summaryrefslogtreecommitdiff
path: root/chromium/v8
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-09-03 13:32:17 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-10-01 14:31:55 +0200
commit21ba0c5d4bf8fba15dddd97cd693bad2358b77fd (patch)
tree91be119f694044dfc1ff9fdc054459e925de9df0 /chromium/v8
parent03c549e0392f92c02536d3f86d5e1d8dfa3435ac (diff)
downloadqtwebengine-chromium-21ba0c5d4bf8fba15dddd97cd693bad2358b77fd.tar.gz
BASELINE: Update Chromium to 92.0.4515.166
Change-Id: I42a050486714e9e54fc271f2a8939223a02ae364
Diffstat (limited to 'chromium/v8')
-rw-r--r--chromium/v8/.gn12
-rw-r--r--chromium/v8/AUTHORS4
-rw-r--r--chromium/v8/BUILD.gn351
-rw-r--r--chromium/v8/DEPS38
-rw-r--r--chromium/v8/OWNERS2
-rw-r--r--chromium/v8/PRESUBMIT.py6
-rw-r--r--chromium/v8/RISCV_OWNERS2
-rw-r--r--chromium/v8/WATCHLISTS14
-rw-r--r--chromium/v8/gni/v8.gni5
-rw-r--r--chromium/v8/include/cppgc/allocation.h5
-rw-r--r--chromium/v8/include/cppgc/cross-thread-persistent.h20
-rw-r--r--chromium/v8/include/cppgc/explicit-management.h17
-rw-r--r--chromium/v8/include/cppgc/heap-statistics.h4
-rw-r--r--chromium/v8/include/cppgc/internal/compiler-specific.h4
-rw-r--r--chromium/v8/include/cppgc/internal/pointer-policies.h39
-rw-r--r--chromium/v8/include/cppgc/member.h31
-rw-r--r--chromium/v8/include/cppgc/persistent.h6
-rw-r--r--chromium/v8/include/cppgc/sentinel-pointer.h2
-rw-r--r--chromium/v8/include/cppgc/type-traits.h19
-rw-r--r--chromium/v8/include/js_protocol.pdl11
-rw-r--r--chromium/v8/include/v8-cppgc.h32
-rw-r--r--chromium/v8/include/v8-fast-api-calls.h50
-rw-r--r--chromium/v8/include/v8-inspector.h12
-rw-r--r--chromium/v8/include/v8-internal.h75
-rw-r--r--chromium/v8/include/v8-metrics.h29
-rw-r--r--chromium/v8/include/v8-profiler.h17
-rw-r--r--chromium/v8/include/v8-util.h2
-rw-r--r--chromium/v8/include/v8-version.h6
-rw-r--r--chromium/v8/include/v8.h193
-rw-r--r--chromium/v8/include/v8config.h14
-rw-r--r--chromium/v8/infra/mb/mb_config.pyl55
-rw-r--r--chromium/v8/infra/testing/builders.pyl536
-rw-r--r--chromium/v8/samples/OWNERS2
-rw-r--r--chromium/v8/samples/shell.cc18
-rw-r--r--chromium/v8/src/DEPS2
-rw-r--r--chromium/v8/src/api/api-arguments-inl.h44
-rw-r--r--chromium/v8/src/api/api-inl.h23
-rw-r--r--chromium/v8/src/api/api-macros.h10
-rw-r--r--chromium/v8/src/api/api-natives.cc102
-rw-r--r--chromium/v8/src/api/api.cc387
-rw-r--r--chromium/v8/src/ast/ast-value-factory.cc16
-rw-r--r--chromium/v8/src/ast/ast-value-factory.h20
-rw-r--r--chromium/v8/src/ast/ast.cc26
-rw-r--r--chromium/v8/src/ast/ast.h38
-rw-r--r--chromium/v8/src/ast/modules.cc16
-rw-r--r--chromium/v8/src/ast/modules.h12
-rw-r--r--chromium/v8/src/ast/scopes.cc15
-rw-r--r--chromium/v8/src/ast/scopes.h8
-rw-r--r--chromium/v8/src/ast/variables.cc7
-rw-r--r--chromium/v8/src/ast/variables.h5
-rw-r--r--chromium/v8/src/base/atomicops.h298
-rw-r--r--chromium/v8/src/base/atomicops_internals_portable.h216
-rw-r--r--chromium/v8/src/base/atomicops_internals_std.h224
-rw-r--r--chromium/v8/src/base/bit-field.h2
-rw-r--r--chromium/v8/src/base/bits.h4
-rw-r--r--chromium/v8/src/base/bounds.h2
-rw-r--r--chromium/v8/src/base/cpu.cc2
-rw-r--r--chromium/v8/src/base/enum-set.h2
-rw-r--r--chromium/v8/src/base/hashmap.h4
-rw-r--r--chromium/v8/src/base/logging.h6
-rw-r--r--chromium/v8/src/base/once.h16
-rw-r--r--chromium/v8/src/base/optional.h12
-rw-r--r--chromium/v8/src/base/platform/mutex.h4
-rw-r--r--chromium/v8/src/base/platform/platform-aix.cc2
-rw-r--r--chromium/v8/src/base/platform/platform-freebsd.cc2
-rw-r--r--chromium/v8/src/base/platform/platform-posix.cc11
-rw-r--r--chromium/v8/src/base/platform/platform-win32.cc2
-rw-r--r--chromium/v8/src/base/platform/semaphore.h4
-rw-r--r--chromium/v8/src/base/sanitizer/asan.h37
-rw-r--r--chromium/v8/src/base/sanitizer/lsan-page-allocator.cc (renamed from chromium/v8/src/sanitizer/lsan-page-allocator.cc)2
-rw-r--r--chromium/v8/src/base/sanitizer/lsan-page-allocator.h (renamed from chromium/v8/src/sanitizer/lsan-page-allocator.h)9
-rw-r--r--chromium/v8/src/base/sanitizer/lsan.h (renamed from chromium/v8/src/base/lsan.h)12
-rw-r--r--chromium/v8/src/base/sanitizer/msan.h40
-rw-r--r--chromium/v8/src/base/small-vector.h7
-rw-r--r--chromium/v8/src/base/template-utils.h9
-rw-r--r--chromium/v8/src/base/v8-fallthrough.h2
-rw-r--r--chromium/v8/src/base/vlq.h83
-rw-r--r--chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h12
-rw-r--r--chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h6
-rw-r--r--chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h4
-rw-r--r--chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h6
-rw-r--r--chromium/v8/src/baseline/baseline-assembler-inl.h7
-rw-r--r--chromium/v8/src/baseline/baseline-assembler.h5
-rw-r--r--chromium/v8/src/baseline/baseline-compiler.cc889
-rw-r--r--chromium/v8/src/baseline/baseline-compiler.h34
-rw-r--r--chromium/v8/src/baseline/baseline-osr-inl.h38
-rw-r--r--chromium/v8/src/baseline/baseline.cc38
-rw-r--r--chromium/v8/src/baseline/baseline.h2
-rw-r--r--chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h8
-rw-r--r--chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h6
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h615
-rw-r--r--chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h112
-rw-r--r--chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h11
-rw-r--r--chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h6
-rw-r--r--chromium/v8/src/bigint/bigint-internal.cc43
-rw-r--r--chromium/v8/src/bigint/bigint-internal.h65
-rw-r--r--chromium/v8/src/bigint/bigint.h108
-rw-r--r--chromium/v8/src/bigint/digit-arithmetic.h87
-rw-r--r--chromium/v8/src/bigint/mul-schoolbook.cc99
-rw-r--r--chromium/v8/src/bigint/vector-arithmetic.cc2
-rw-r--r--chromium/v8/src/bigint/vector-arithmetic.h20
-rw-r--r--chromium/v8/src/builtins/accessors.cc34
-rw-r--r--chromium/v8/src/builtins/aggregate-error.tq7
-rw-r--r--chromium/v8/src/builtins/arm/builtins-arm.cc52
-rw-r--r--chromium/v8/src/builtins/arm64/builtins-arm64.cc58
-rw-r--r--chromium/v8/src/builtins/arraybuffer.tq105
-rw-r--r--chromium/v8/src/builtins/base.tq10
-rw-r--r--chromium/v8/src/builtins/builtins-api.cc6
-rw-r--r--chromium/v8/src/builtins/builtins-array-gen.cc1
-rw-r--r--chromium/v8/src/builtins/builtins-array.cc65
-rw-r--r--chromium/v8/src/builtins/builtins-arraybuffer.cc249
-rw-r--r--chromium/v8/src/builtins/builtins-async-function-gen.cc4
-rw-r--r--chromium/v8/src/builtins/builtins-async-gen.cc62
-rw-r--r--chromium/v8/src/builtins/builtins-async-gen.h6
-rw-r--r--chromium/v8/src/builtins/builtins-async-generator-gen.cc2
-rw-r--r--chromium/v8/src/builtins/builtins-call-gen.cc103
-rw-r--r--chromium/v8/src/builtins/builtins-call-gen.h7
-rw-r--r--chromium/v8/src/builtins/builtins-debug-gen.cc22
-rw-r--r--chromium/v8/src/builtins/builtins-definitions.h46
-rw-r--r--chromium/v8/src/builtins/builtins-error.cc5
-rw-r--r--chromium/v8/src/builtins/builtins-generator-gen.cc2
-rw-r--r--chromium/v8/src/builtins/builtins-handler-gen.cc55
-rw-r--r--chromium/v8/src/builtins/builtins-ic-gen.cc273
-rw-r--r--chromium/v8/src/builtins/builtins-internal-gen.cc40
-rw-r--r--chromium/v8/src/builtins/builtins-intl.cc43
-rw-r--r--chromium/v8/src/builtins/builtins-lazy-gen.cc15
-rw-r--r--chromium/v8/src/builtins/builtins-microtask-queue-gen.cc61
-rw-r--r--chromium/v8/src/builtins/builtins-regexp-gen.cc31
-rw-r--r--chromium/v8/src/builtins/builtins-trace.cc1
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array-gen.cc69
-rw-r--r--chromium/v8/src/builtins/builtins-typed-array.cc3
-rw-r--r--chromium/v8/src/builtins/builtins-utils.h3
-rw-r--r--chromium/v8/src/builtins/builtins-wasm-gen.cc1
-rw-r--r--chromium/v8/src/builtins/cast.tq24
-rw-r--r--chromium/v8/src/builtins/constructor.tq11
-rw-r--r--chromium/v8/src/builtins/conversion.tq21
-rw-r--r--chromium/v8/src/builtins/ia32/builtins-ia32.cc65
-rw-r--r--chromium/v8/src/builtins/ic-callable.tq73
-rw-r--r--chromium/v8/src/builtins/ic.tq9
-rw-r--r--chromium/v8/src/builtins/iterator.tq12
-rw-r--r--chromium/v8/src/builtins/mips/builtins-mips.cc37
-rw-r--r--chromium/v8/src/builtins/mips64/builtins-mips64.cc49
-rw-r--r--chromium/v8/src/builtins/ppc/builtins-ppc.cc241
-rw-r--r--chromium/v8/src/builtins/promise-abstract-operations.tq15
-rw-r--r--chromium/v8/src/builtins/promise-all.tq3
-rw-r--r--chromium/v8/src/builtins/promise-constructor.tq7
-rw-r--r--chromium/v8/src/builtins/promise-jobs.tq3
-rw-r--r--chromium/v8/src/builtins/promise-misc.tq121
-rw-r--r--chromium/v8/src/builtins/promise-resolve.tq16
-rw-r--r--chromium/v8/src/builtins/riscv64/builtins-riscv64.cc460
-rw-r--r--chromium/v8/src/builtins/s390/builtins-s390.cc51
-rw-r--r--chromium/v8/src/builtins/setup-builtins-internal.cc16
-rw-r--r--chromium/v8/src/builtins/typed-array-createtypedarray.tq72
-rw-r--r--chromium/v8/src/builtins/wasm.tq5
-rw-r--r--chromium/v8/src/builtins/x64/builtins-x64.cc194
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm-inl.h2
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.cc148
-rw-r--r--chromium/v8/src/codegen/arm/assembler-arm.h77
-rw-r--r--chromium/v8/src/codegen/arm/cpu-arm.cc2
-rw-r--r--chromium/v8/src/codegen/arm/interface-descriptors-arm-inl.h256
-rw-r--r--chromium/v8/src/codegen/arm/interface-descriptors-arm.cc306
-rw-r--r--chromium/v8/src/codegen/arm/macro-assembler-arm.cc98
-rw-r--r--chromium/v8/src/codegen/arm/macro-assembler-arm.h27
-rw-r--r--chromium/v8/src/codegen/arm64/assembler-arm64-inl.h8
-rw-r--r--chromium/v8/src/codegen/arm64/assembler-arm64.h6
-rw-r--r--chromium/v8/src/codegen/arm64/cpu-arm64.cc9
-rw-r--r--chromium/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h265
-rw-r--r--chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc310
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h10
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc112
-rw-r--r--chromium/v8/src/codegen/arm64/macro-assembler-arm64.h27
-rw-r--r--chromium/v8/src/codegen/arm64/register-arm64.h8
-rw-r--r--chromium/v8/src/codegen/assembler.cc4
-rw-r--r--chromium/v8/src/codegen/assembler.h27
-rw-r--r--chromium/v8/src/codegen/bailout-reason.h1
-rw-r--r--chromium/v8/src/codegen/code-factory.cc48
-rw-r--r--chromium/v8/src/codegen/code-factory.h11
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.cc346
-rw-r--r--chromium/v8/src/codegen/code-stub-assembler.h93
-rw-r--r--chromium/v8/src/codegen/compilation-cache.cc68
-rw-r--r--chromium/v8/src/codegen/compilation-cache.h33
-rw-r--r--chromium/v8/src/codegen/compiler.cc325
-rw-r--r--chromium/v8/src/codegen/compiler.h9
-rw-r--r--chromium/v8/src/codegen/constants-arch.h18
-rw-r--r--chromium/v8/src/codegen/cpu-features.h3
-rw-r--r--chromium/v8/src/codegen/external-reference-table.cc131
-rw-r--r--chromium/v8/src/codegen/external-reference-table.h33
-rw-r--r--chromium/v8/src/codegen/external-reference.cc28
-rw-r--r--chromium/v8/src/codegen/external-reference.h16
-rw-r--r--chromium/v8/src/codegen/handler-table.h4
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.cc77
-rw-r--r--chromium/v8/src/codegen/ia32/assembler-ia32.h17
-rw-r--r--chromium/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h267
-rw-r--r--chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc318
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc144
-rw-r--r--chromium/v8/src/codegen/ia32/macro-assembler-ia32.h236
-rw-r--r--chromium/v8/src/codegen/interface-descriptors-inl.h484
-rw-r--r--chromium/v8/src/codegen/interface-descriptors.cc561
-rw-r--r--chromium/v8/src/codegen/interface-descriptors.h1384
-rw-r--r--chromium/v8/src/codegen/machine-type.cc2
-rw-r--r--chromium/v8/src/codegen/machine-type.h29
-rw-r--r--chromium/v8/src/codegen/macro-assembler.h6
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips.cc18
-rw-r--r--chromium/v8/src/codegen/mips/assembler-mips.h2
-rw-r--r--chromium/v8/src/codegen/mips/interface-descriptors-mips-inl.h258
-rw-r--r--chromium/v8/src/codegen/mips/interface-descriptors-mips.cc332
-rw-r--r--chromium/v8/src/codegen/mips/macro-assembler-mips.cc103
-rw-r--r--chromium/v8/src/codegen/mips/macro-assembler-mips.h27
-rw-r--r--chromium/v8/src/codegen/mips64/assembler-mips64.cc18
-rw-r--r--chromium/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h258
-rw-r--r--chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc332
-rw-r--r--chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc103
-rw-r--r--chromium/v8/src/codegen/mips64/macro-assembler-mips64.h27
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.cc16
-rw-r--r--chromium/v8/src/codegen/optimized-compilation-info.h31
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.cc22
-rw-r--r--chromium/v8/src/codegen/ppc/assembler-ppc.h7
-rw-r--r--chromium/v8/src/codegen/ppc/constants-ppc.h42
-rw-r--r--chromium/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h256
-rw-r--r--chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc306
-rw-r--r--chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc279
-rw-r--r--chromium/v8/src/codegen/ppc/macro-assembler-ppc.h72
-rw-r--r--chromium/v8/src/codegen/register-arch.h21
-rw-r--r--chromium/v8/src/codegen/register.h4
-rw-r--r--chromium/v8/src/codegen/reloc-info.cc39
-rw-r--r--chromium/v8/src/codegen/reloc-info.h4
-rw-r--r--chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h32
-rw-r--r--chromium/v8/src/codegen/riscv64/assembler-riscv64.cc146
-rw-r--r--chromium/v8/src/codegen/riscv64/assembler-riscv64.h80
-rw-r--r--chromium/v8/src/codegen/riscv64/constants-riscv64.cc5
-rw-r--r--chromium/v8/src/codegen/riscv64/constants-riscv64.h15
-rw-r--r--chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h265
-rw-r--r--chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc313
-rw-r--r--chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc367
-rw-r--r--chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h59
-rw-r--r--chromium/v8/src/codegen/riscv64/register-riscv64.h2
-rw-r--r--chromium/v8/src/codegen/s390/assembler-s390.cc4
-rw-r--r--chromium/v8/src/codegen/s390/interface-descriptors-s390-inl.h256
-rw-r--r--chromium/v8/src/codegen/s390/interface-descriptors-s390.cc306
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.cc155
-rw-r--r--chromium/v8/src/codegen/s390/macro-assembler-s390.h41
-rw-r--r--chromium/v8/src/codegen/safepoint-table.cc58
-rw-r--r--chromium/v8/src/codegen/safepoint-table.h43
-rw-r--r--chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc195
-rw-r--r--chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h221
-rw-r--r--chromium/v8/src/codegen/signature.h8
-rw-r--r--chromium/v8/src/codegen/source-position-table.cc4
-rw-r--r--chromium/v8/src/codegen/source-position-table.h4
-rw-r--r--chromium/v8/src/codegen/string-constants.cc3
-rw-r--r--chromium/v8/src/codegen/turbo-assembler.cc7
-rw-r--r--chromium/v8/src/codegen/turbo-assembler.h33
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.cc66
-rw-r--r--chromium/v8/src/codegen/x64/assembler-x64.h11
-rw-r--r--chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h258
-rw-r--r--chromium/v8/src/codegen/x64/interface-descriptors-x64.cc309
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.cc363
-rw-r--r--chromium/v8/src/codegen/x64/macro-assembler-x64.h178
-rw-r--r--chromium/v8/src/codegen/x64/register-x64.h6
-rw-r--r--chromium/v8/src/common/external-pointer-inl.h21
-rw-r--r--chromium/v8/src/common/external-pointer.h4
-rw-r--r--chromium/v8/src/common/globals.h41
-rw-r--r--chromium/v8/src/common/message-template.h5
-rw-r--r--chromium/v8/src/common/ptr-compr-inl.h24
-rw-r--r--chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc9
-rw-r--r--chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc17
-rw-r--r--chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h3
-rw-r--r--chromium/v8/src/compiler/OWNERS6
-rw-r--r--chromium/v8/src/compiler/access-builder.cc14
-rw-r--r--chromium/v8/src/compiler/access-builder.h6
-rw-r--r--chromium/v8/src/compiler/access-info.cc260
-rw-r--r--chromium/v8/src/compiler/access-info.h32
-rw-r--r--chromium/v8/src/compiler/add-type-assertions-reducer.cc4
-rw-r--r--chromium/v8/src/compiler/backend/arm/code-generator-arm.cc27
-rw-r--r--chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc8
-rw-r--r--chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc37
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h2
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc2
-rw-r--r--chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc26
-rw-r--r--chromium/v8/src/compiler/backend/code-generator.cc8
-rw-r--r--chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc215
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h6
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc6
-rw-r--r--chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc85
-rw-r--r--chromium/v8/src/compiler/backend/instruction-scheduler.h6
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector-impl.h10
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector.cc30
-rw-r--r--chromium/v8/src/compiler/backend/instruction-selector.h3
-rw-r--r--chromium/v8/src/compiler/backend/instruction.cc7
-rw-r--r--chromium/v8/src/compiler/backend/instruction.h20
-rw-r--r--chromium/v8/src/compiler/backend/mid-tier-register-allocator.cc44
-rw-r--r--chromium/v8/src/compiler/backend/mips/code-generator-mips.cc85
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc4
-rw-r--r--chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc15
-rw-r--r--chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc61
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc4
-rw-r--r--chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc19
-rw-r--r--chromium/v8/src/compiler/backend/ppc/OWNERS5
-rw-r--r--chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc207
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h6
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc6
-rw-r--r--chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc8
-rw-r--r--chromium/v8/src/compiler/backend/register-allocation.h1
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator-verifier.cc22
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator-verifier.h2
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator.cc48
-rw-r--r--chromium/v8/src/compiler/backend/register-allocator.h3
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc112
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h1
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc5
-rw-r--r--chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc22
-rw-r--r--chromium/v8/src/compiler/backend/s390/code-generator-s390.cc165
-rw-r--r--chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc11
-rw-r--r--chromium/v8/src/compiler/backend/x64/code-generator-x64.cc230
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h1
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc1
-rw-r--r--chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc91
-rw-r--r--chromium/v8/src/compiler/bytecode-graph-builder.cc136
-rw-r--r--chromium/v8/src/compiler/code-assembler.cc41
-rw-r--r--chromium/v8/src/compiler/code-assembler.h19
-rw-r--r--chromium/v8/src/compiler/common-operator-reducer.cc10
-rw-r--r--chromium/v8/src/compiler/common-operator.h17
-rw-r--r--chromium/v8/src/compiler/compilation-dependencies.cc22
-rw-r--r--chromium/v8/src/compiler/compilation-dependencies.h4
-rw-r--r--chromium/v8/src/compiler/constant-folding-reducer.cc3
-rw-r--r--chromium/v8/src/compiler/dead-code-elimination.cc8
-rw-r--r--chromium/v8/src/compiler/dead-code-elimination.h5
-rw-r--r--chromium/v8/src/compiler/decompression-optimizer.cc7
-rw-r--r--chromium/v8/src/compiler/effect-control-linearizer.cc127
-rw-r--r--chromium/v8/src/compiler/effect-control-linearizer.h15
-rw-r--r--chromium/v8/src/compiler/globals.h21
-rw-r--r--chromium/v8/src/compiler/graph-assembler.cc66
-rw-r--r--chromium/v8/src/compiler/graph-assembler.h24
-rw-r--r--chromium/v8/src/compiler/graph-reducer.cc4
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.cc7
-rw-r--r--chromium/v8/src/compiler/graph-visualizer.h3
-rw-r--r--chromium/v8/src/compiler/graph.h2
-rw-r--r--chromium/v8/src/compiler/heap-refs.cc4594
-rw-r--r--chromium/v8/src/compiler/heap-refs.h297
-rw-r--r--chromium/v8/src/compiler/int64-lowering.cc33
-rw-r--r--chromium/v8/src/compiler/js-call-reducer.cc327
-rw-r--r--chromium/v8/src/compiler/js-context-specialization.cc25
-rw-r--r--chromium/v8/src/compiler/js-create-lowering.cc120
-rw-r--r--chromium/v8/src/compiler/js-create-lowering.h11
-rw-r--r--chromium/v8/src/compiler/js-generic-lowering.cc7
-rw-r--r--chromium/v8/src/compiler/js-graph.cc3
-rw-r--r--chromium/v8/src/compiler/js-graph.h7
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.cc4620
-rw-r--r--chromium/v8/src/compiler/js-heap-broker.h173
-rw-r--r--chromium/v8/src/compiler/js-heap-copy-reducer.cc43
-rw-r--r--chromium/v8/src/compiler/js-inlining-heuristic.cc4
-rw-r--r--chromium/v8/src/compiler/js-inlining.cc7
-rw-r--r--chromium/v8/src/compiler/js-intrinsic-lowering.cc20
-rw-r--r--chromium/v8/src/compiler/js-intrinsic-lowering.h1
-rw-r--r--chromium/v8/src/compiler/js-native-context-specialization.cc132
-rw-r--r--chromium/v8/src/compiler/js-operator.cc10
-rw-r--r--chromium/v8/src/compiler/js-operator.h84
-rw-r--r--chromium/v8/src/compiler/js-typed-lowering.cc28
-rw-r--r--chromium/v8/src/compiler/load-elimination.cc3
-rw-r--r--chromium/v8/src/compiler/loop-analysis.cc25
-rw-r--r--chromium/v8/src/compiler/machine-graph-verifier.cc1
-rw-r--r--chromium/v8/src/compiler/machine-operator-reducer.cc57
-rw-r--r--chromium/v8/src/compiler/machine-operator.cc10
-rw-r--r--chromium/v8/src/compiler/machine-operator.h55
-rw-r--r--chromium/v8/src/compiler/map-inference.cc8
-rw-r--r--chromium/v8/src/compiler/memory-lowering.cc95
-rw-r--r--chromium/v8/src/compiler/memory-lowering.h1
-rw-r--r--chromium/v8/src/compiler/memory-optimizer.cc39
-rw-r--r--chromium/v8/src/compiler/memory-optimizer.h2
-rw-r--r--chromium/v8/src/compiler/node-matchers.h9
-rw-r--r--chromium/v8/src/compiler/node-observer.h2
-rw-r--r--chromium/v8/src/compiler/node-properties.h10
-rw-r--r--chromium/v8/src/compiler/opcodes.h14
-rw-r--r--chromium/v8/src/compiler/operation-typer.cc11
-rw-r--r--chromium/v8/src/compiler/operator.h2
-rw-r--r--chromium/v8/src/compiler/pipeline.cc204
-rw-r--r--chromium/v8/src/compiler/processed-feedback.h8
-rw-r--r--chromium/v8/src/compiler/property-access-builder.cc27
-rw-r--r--chromium/v8/src/compiler/raw-machine-assembler.cc7
-rw-r--r--chromium/v8/src/compiler/raw-machine-assembler.h47
-rw-r--r--chromium/v8/src/compiler/refs-map.cc4
-rw-r--r--chromium/v8/src/compiler/refs-map.h1
-rw-r--r--chromium/v8/src/compiler/representation-change.cc33
-rw-r--r--chromium/v8/src/compiler/representation-change.h11
-rw-r--r--chromium/v8/src/compiler/scheduled-machine-lowering.cc68
-rw-r--r--chromium/v8/src/compiler/scheduled-machine-lowering.h51
-rw-r--r--chromium/v8/src/compiler/scheduler.cc156
-rw-r--r--chromium/v8/src/compiler/scheduler.h7
-rw-r--r--chromium/v8/src/compiler/select-lowering.cc10
-rw-r--r--chromium/v8/src/compiler/serializer-for-background-compilation.cc216
-rw-r--r--chromium/v8/src/compiler/simd-scalar-lowering.cc2829
-rw-r--r--chromium/v8/src/compiler/simd-scalar-lowering.h150
-rw-r--r--chromium/v8/src/compiler/simplified-lowering.cc207
-rw-r--r--chromium/v8/src/compiler/simplified-operator-reducer.cc7
-rw-r--r--chromium/v8/src/compiler/simplified-operator.cc38
-rw-r--r--chromium/v8/src/compiler/simplified-operator.h44
-rw-r--r--chromium/v8/src/compiler/store-store-elimination.cc4
-rw-r--r--chromium/v8/src/compiler/type-cache.h18
-rw-r--r--chromium/v8/src/compiler/type-narrowing-reducer.cc8
-rw-r--r--chromium/v8/src/compiler/type-narrowing-reducer.h1
-rw-r--r--chromium/v8/src/compiler/typed-optimization.cc2
-rw-r--r--chromium/v8/src/compiler/typer.cc6
-rw-r--r--chromium/v8/src/compiler/types.cc13
-rw-r--r--chromium/v8/src/compiler/types.h6
-rw-r--r--chromium/v8/src/compiler/verifier.cc27
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.cc404
-rw-r--r--chromium/v8/src/compiler/wasm-compiler.h5
-rw-r--r--chromium/v8/src/d8/async-hooks-wrapper.cc6
-rw-r--r--chromium/v8/src/d8/d8-console.cc5
-rw-r--r--chromium/v8/src/d8/d8-posix.cc132
-rw-r--r--chromium/v8/src/d8/d8-test.cc200
-rw-r--r--chromium/v8/src/d8/d8.cc239
-rw-r--r--chromium/v8/src/d8/d8.h14
-rw-r--r--chromium/v8/src/debug/arm/debug-arm.cc57
-rw-r--r--chromium/v8/src/debug/arm64/debug-arm64.cc58
-rw-r--r--chromium/v8/src/debug/debug-coverage.cc3
-rw-r--r--chromium/v8/src/debug/debug-evaluate.cc29
-rw-r--r--chromium/v8/src/debug/debug-evaluate.h4
-rw-r--r--chromium/v8/src/debug/debug-frames.cc17
-rw-r--r--chromium/v8/src/debug/debug-frames.h3
-rw-r--r--chromium/v8/src/debug/debug-interface.cc67
-rw-r--r--chromium/v8/src/debug/debug-interface.h31
-rw-r--r--chromium/v8/src/debug/debug-scopes.cc8
-rw-r--r--chromium/v8/src/debug/debug-stack-trace-iterator.cc8
-rw-r--r--chromium/v8/src/debug/debug-stack-trace-iterator.h1
-rw-r--r--chromium/v8/src/debug/debug-wasm-objects.cc117
-rw-r--r--chromium/v8/src/debug/debug-wasm-objects.h11
-rw-r--r--chromium/v8/src/debug/debug.cc65
-rw-r--r--chromium/v8/src/debug/debug.h32
-rw-r--r--chromium/v8/src/debug/ia32/debug-ia32.cc54
-rw-r--r--chromium/v8/src/debug/liveedit.cc289
-rw-r--r--chromium/v8/src/debug/liveedit.h7
-rw-r--r--chromium/v8/src/debug/mips/debug-mips.cc55
-rw-r--r--chromium/v8/src/debug/mips64/debug-mips64.cc55
-rw-r--r--chromium/v8/src/debug/ppc/OWNERS5
-rw-r--r--chromium/v8/src/debug/ppc/debug-ppc.cc53
-rw-r--r--chromium/v8/src/debug/riscv64/debug-riscv64.cc55
-rw-r--r--chromium/v8/src/debug/s390/debug-s390.cc55
-rw-r--r--chromium/v8/src/debug/x64/debug-x64.cc55
-rw-r--r--chromium/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc8
-rw-r--r--chromium/v8/src/deoptimizer/deoptimizer.cc17
-rw-r--r--chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc6
-rw-r--r--chromium/v8/src/deoptimizer/translated-state.cc14
-rw-r--r--chromium/v8/src/diagnostics/compilation-statistics.cc5
-rw-r--r--chromium/v8/src/diagnostics/disassembler.cc13
-rw-r--r--chromium/v8/src/diagnostics/eh-frame.cc6
-rw-r--r--chromium/v8/src/diagnostics/eh-frame.h8
-rw-r--r--chromium/v8/src/diagnostics/ia32/disasm-ia32.cc5
-rw-r--r--chromium/v8/src/diagnostics/objects-debug.cc31
-rw-r--r--chromium/v8/src/diagnostics/objects-printer.cc341
-rw-r--r--chromium/v8/src/diagnostics/perf-jit.cc1
-rw-r--r--chromium/v8/src/diagnostics/ppc/disasm-ppc.cc36
-rw-r--r--chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc25
-rw-r--r--chromium/v8/src/diagnostics/system-jit-metadata-win.h3
-rw-r--r--chromium/v8/src/diagnostics/system-jit-win.cc4
-rw-r--r--chromium/v8/src/diagnostics/unwinding-info-win64.cc47
-rw-r--r--chromium/v8/src/execution/arguments.h45
-rw-r--r--chromium/v8/src/execution/arm/simulator-arm.cc696
-rw-r--r--chromium/v8/src/execution/arm/simulator-arm.h9
-rw-r--r--chromium/v8/src/execution/arm64/simulator-arm64.cc2
-rw-r--r--chromium/v8/src/execution/execution.cc16
-rw-r--r--chromium/v8/src/execution/external-pointer-table.h9
-rw-r--r--chromium/v8/src/execution/frame-constants.h18
-rw-r--r--chromium/v8/src/execution/frames.cc41
-rw-r--r--chromium/v8/src/execution/frames.h7
-rw-r--r--chromium/v8/src/execution/isolate-data.h32
-rw-r--r--chromium/v8/src/execution/isolate-utils-inl.h19
-rw-r--r--chromium/v8/src/execution/isolate.cc330
-rw-r--r--chromium/v8/src/execution/isolate.h183
-rw-r--r--chromium/v8/src/execution/local-isolate-inl.h2
-rw-r--r--chromium/v8/src/execution/local-isolate.h5
-rw-r--r--chromium/v8/src/execution/messages.cc45
-rw-r--r--chromium/v8/src/execution/messages.h7
-rw-r--r--chromium/v8/src/execution/microtask-queue.cc10
-rw-r--r--chromium/v8/src/execution/mips/simulator-mips.cc2
-rw-r--r--chromium/v8/src/execution/mips64/simulator-mips64.cc2
-rw-r--r--chromium/v8/src/execution/ppc/simulator-ppc.cc52
-rw-r--r--chromium/v8/src/execution/ppc/simulator-ppc.h12
-rw-r--r--chromium/v8/src/execution/protectors.h2
-rw-r--r--chromium/v8/src/execution/riscv64/simulator-riscv64.cc39
-rw-r--r--chromium/v8/src/execution/riscv64/simulator-riscv64.h1
-rw-r--r--chromium/v8/src/execution/runtime-profiler.cc15
-rw-r--r--chromium/v8/src/execution/s390/simulator-s390.cc96
-rw-r--r--chromium/v8/src/execution/shared-mutex-guard-if-off-thread.h2
-rw-r--r--chromium/v8/src/execution/stack-guard.cc1
-rw-r--r--chromium/v8/src/extensions/cputracemark-extension.cc5
-rw-r--r--chromium/v8/src/extensions/externalize-string-extension.cc21
-rw-r--r--chromium/v8/src/extensions/vtunedomain-support-extension.cc7
-rw-r--r--chromium/v8/src/flags/flag-definitions.h147
-rw-r--r--chromium/v8/src/flags/flags.cc2
-rw-r--r--chromium/v8/src/handles/global-handles.cc23
-rw-r--r--chromium/v8/src/handles/handles-inl.h2
-rw-r--r--chromium/v8/src/handles/local-handles-inl.h2
-rw-r--r--chromium/v8/src/heap/array-buffer-sweeper.cc55
-rw-r--r--chromium/v8/src/heap/array-buffer-sweeper.h9
-rw-r--r--chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc34
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc16
-rw-r--r--chromium/v8/src/heap/base/asm/x64/push_registers_masm.S18
-rw-r--r--chromium/v8/src/heap/base/stack.cc17
-rw-r--r--chromium/v8/src/heap/base/stack.h14
-rw-r--r--chromium/v8/src/heap/basic-memory-chunk.h7
-rw-r--r--chromium/v8/src/heap/code-range.cc172
-rw-r--r--chromium/v8/src/heap/code-range.h147
-rw-r--r--chromium/v8/src/heap/collection-barrier.cc3
-rw-r--r--chromium/v8/src/heap/combined-heap.h3
-rw-r--r--chromium/v8/src/heap/concurrent-allocator-inl.h1
-rw-r--r--chromium/v8/src/heap/concurrent-marking.cc31
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.cc115
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-heap.h9
-rw-r--r--chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc10
-rw-r--r--chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc11
-rw-r--r--chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h2
-rw-r--r--chromium/v8/src/heap/cppgc/caged-heap.cc23
-rw-r--r--chromium/v8/src/heap/cppgc/compactor.cc21
-rw-r--r--chromium/v8/src/heap/cppgc/concurrent-marker.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/explicit-management.cc67
-rw-r--r--chromium/v8/src/heap/cppgc/free-list.cc12
-rw-r--r--chromium/v8/src/heap/cppgc/gc-info-table.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.cc32
-rw-r--r--chromium/v8/src/heap/cppgc/heap-base.h30
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.cc8
-rw-r--r--chromium/v8/src/heap/cppgc/heap-object-header.h43
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.cc5
-rw-r--r--chromium/v8/src/heap/cppgc/heap-page.h22
-rw-r--r--chromium/v8/src/heap/cppgc/heap-statistics-collector.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/heap.cc4
-rw-r--r--chromium/v8/src/heap/cppgc/liveness-broker.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/marker.cc42
-rw-r--r--chromium/v8/src/heap/cppgc/marking-state.h41
-rw-r--r--chromium/v8/src/heap/cppgc/marking-verifier.cc61
-rw-r--r--chromium/v8/src/heap/cppgc/marking-verifier.h14
-rw-r--r--chromium/v8/src/heap/cppgc/marking-visitor.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/memory.cc22
-rw-r--r--chromium/v8/src/heap/cppgc/memory.h76
-rw-r--r--chromium/v8/src/heap/cppgc/metric-recorder.h2
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/object-allocator.h10
-rw-r--r--chromium/v8/src/heap/cppgc/object-poisoner.h4
-rw-r--r--chromium/v8/src/heap/cppgc/object-size-trait.cc7
-rw-r--r--chromium/v8/src/heap/cppgc/object-view.h54
-rw-r--r--chromium/v8/src/heap/cppgc/page-memory.cc2
-rw-r--r--chromium/v8/src/heap/cppgc/pointer-policies.cc59
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap-statistics.h2
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap.cc56
-rw-r--r--chromium/v8/src/heap/cppgc/process-heap.h35
-rw-r--r--chromium/v8/src/heap/cppgc/sanitizers.h82
-rw-r--r--chromium/v8/src/heap/cppgc/stats-collector.h4
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.cc81
-rw-r--r--chromium/v8/src/heap/cppgc/sweeper.h4
-rw-r--r--chromium/v8/src/heap/cppgc/trace-trait.cc7
-rw-r--r--chromium/v8/src/heap/cppgc/visitor.cc21
-rw-r--r--chromium/v8/src/heap/embedder-tracing.cc34
-rw-r--r--chromium/v8/src/heap/embedder-tracing.h42
-rw-r--r--chromium/v8/src/heap/factory-base-inl.h26
-rw-r--r--chromium/v8/src/heap/factory-base.cc121
-rw-r--r--chromium/v8/src/heap/factory-base.h9
-rw-r--r--chromium/v8/src/heap/factory.cc248
-rw-r--r--chromium/v8/src/heap/factory.h19
-rw-r--r--chromium/v8/src/heap/finalization-registry-cleanup-task.cc15
-rw-r--r--chromium/v8/src/heap/free-list.cc21
-rw-r--r--chromium/v8/src/heap/gc-tracer.cc32
-rw-r--r--chromium/v8/src/heap/gc-tracer.h6
-rw-r--r--chromium/v8/src/heap/heap-inl.h89
-rw-r--r--chromium/v8/src/heap/heap-write-barrier.cc11
-rw-r--r--chromium/v8/src/heap/heap.cc553
-rw-r--r--chromium/v8/src/heap/heap.h127
-rw-r--r--chromium/v8/src/heap/incremental-marking.cc43
-rw-r--r--chromium/v8/src/heap/large-spaces.cc6
-rw-r--r--chromium/v8/src/heap/large-spaces.h1
-rw-r--r--chromium/v8/src/heap/local-allocator.h2
-rw-r--r--chromium/v8/src/heap/local-heap-inl.h2
-rw-r--r--chromium/v8/src/heap/local-heap.cc2
-rw-r--r--chromium/v8/src/heap/mark-compact-inl.h8
-rw-r--r--chromium/v8/src/heap/mark-compact.cc164
-rw-r--r--chromium/v8/src/heap/mark-compact.h7
-rw-r--r--chromium/v8/src/heap/marking-barrier-inl.h10
-rw-r--r--chromium/v8/src/heap/marking-visitor-inl.h6
-rw-r--r--chromium/v8/src/heap/marking-visitor.h9
-rw-r--r--chromium/v8/src/heap/memory-allocator.cc112
-rw-r--r--chromium/v8/src/heap/memory-allocator.h68
-rw-r--r--chromium/v8/src/heap/memory-chunk.cc3
-rw-r--r--chromium/v8/src/heap/memory-measurement.cc2
-rw-r--r--chromium/v8/src/heap/new-spaces-inl.h8
-rw-r--r--chromium/v8/src/heap/new-spaces.cc9
-rw-r--r--chromium/v8/src/heap/object-stats.cc2
-rw-r--r--chromium/v8/src/heap/objects-visiting-inl.h7
-rw-r--r--chromium/v8/src/heap/objects-visiting.h7
-rw-r--r--chromium/v8/src/heap/paged-spaces-inl.h3
-rw-r--r--chromium/v8/src/heap/paged-spaces.cc6
-rw-r--r--chromium/v8/src/heap/read-only-heap.cc4
-rw-r--r--chromium/v8/src/heap/read-only-heap.h5
-rw-r--r--chromium/v8/src/heap/read-only-spaces.cc40
-rw-r--r--chromium/v8/src/heap/read-only-spaces.h3
-rw-r--r--chromium/v8/src/heap/scavenger-inl.h20
-rw-r--r--chromium/v8/src/heap/scavenger.cc18
-rw-r--r--chromium/v8/src/heap/setup-heap-internal.cc32
-rw-r--r--chromium/v8/src/heap/spaces-inl.h3
-rw-r--r--chromium/v8/src/heap/spaces.cc2
-rw-r--r--chromium/v8/src/heap/spaces.h3
-rw-r--r--chromium/v8/src/heap/sweeper.cc6
-rw-r--r--chromium/v8/src/heap/third-party/heap-api-stub.cc6
-rw-r--r--chromium/v8/src/heap/third-party/heap-api.h6
-rw-r--r--chromium/v8/src/heap/weak-object-worklists.cc24
-rw-r--r--chromium/v8/src/ic/accessor-assembler.cc265
-rw-r--r--chromium/v8/src/ic/accessor-assembler.h6
-rw-r--r--chromium/v8/src/ic/call-optimization.cc40
-rw-r--r--chromium/v8/src/ic/call-optimization.h29
-rw-r--r--chromium/v8/src/ic/ic.cc97
-rw-r--r--chromium/v8/src/ic/ic.h5
-rw-r--r--chromium/v8/src/ic/keyed-store-generic.cc6
-rw-r--r--chromium/v8/src/init/bootstrapper.cc317
-rw-r--r--chromium/v8/src/init/heap-symbols.h451
-rw-r--r--chromium/v8/src/init/isolate-allocator.cc238
-rw-r--r--chromium/v8/src/init/isolate-allocator.h39
-rw-r--r--chromium/v8/src/init/v8.cc23
-rw-r--r--chromium/v8/src/inspector/injected-script.cc3
-rw-r--r--chromium/v8/src/inspector/injected-script.h3
-rw-r--r--chromium/v8/src/inspector/string-16.h4
-rw-r--r--chromium/v8/src/inspector/v8-console-message.cc2
-rw-r--r--chromium/v8/src/inspector/v8-console.cc94
-rw-r--r--chromium/v8/src/inspector/v8-debugger-agent-impl.cc18
-rw-r--r--chromium/v8/src/inspector/v8-debugger.cc2
-rw-r--r--chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc15
-rw-r--r--chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h9
-rw-r--r--chromium/v8/src/inspector/v8-inspector-session-impl.cc14
-rw-r--r--chromium/v8/src/inspector/v8-inspector-session-impl.h3
-rw-r--r--chromium/v8/src/inspector/v8-profiler-agent-impl.cc9
-rw-r--r--chromium/v8/src/inspector/v8-runtime-agent-impl.cc19
-rw-r--r--chromium/v8/src/inspector/v8-runtime-agent-impl.h3
-rw-r--r--chromium/v8/src/inspector/v8-stack-trace-impl.cc17
-rw-r--r--chromium/v8/src/inspector/v8-stack-trace-impl.h5
-rw-r--r--chromium/v8/src/inspector/v8-string-conversions.cc7
-rw-r--r--chromium/v8/src/inspector/value-mirror.cc69
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-builder.cc57
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-builder.h8
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-iterator.cc8
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-iterator.h8
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-writer.cc8
-rw-r--r--chromium/v8/src/interpreter/bytecode-array-writer.h10
-rw-r--r--chromium/v8/src/interpreter/bytecode-generator.cc45
-rw-r--r--chromium/v8/src/interpreter/bytecode-generator.h16
-rw-r--r--chromium/v8/src/interpreter/bytecode-operands.h2
-rw-r--r--chromium/v8/src/interpreter/bytecodes.h75
-rw-r--r--chromium/v8/src/interpreter/constant-array-builder.cc17
-rw-r--r--chromium/v8/src/interpreter/constant-array-builder.h16
-rw-r--r--chromium/v8/src/interpreter/handler-table-builder.cc4
-rw-r--r--chromium/v8/src/interpreter/handler-table-builder.h4
-rw-r--r--chromium/v8/src/interpreter/interpreter-assembler.cc26
-rw-r--r--chromium/v8/src/interpreter/interpreter-assembler.h3
-rw-r--r--chromium/v8/src/interpreter/interpreter-generator.cc27
-rw-r--r--chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc6
-rw-r--r--chromium/v8/src/interpreter/interpreter-intrinsics.h1
-rw-r--r--chromium/v8/src/interpreter/interpreter.cc36
-rw-r--r--chromium/v8/src/json/json-parser.cc16
-rw-r--r--chromium/v8/src/libplatform/tracing/recorder-default.cc25
-rw-r--r--chromium/v8/src/libplatform/tracing/recorder-mac.cc43
-rw-r--r--chromium/v8/src/libplatform/tracing/recorder.h23
-rw-r--r--chromium/v8/src/libplatform/tracing/trace-writer.cc5
-rw-r--r--chromium/v8/src/libplatform/tracing/trace-writer.h2
-rw-r--r--chromium/v8/src/logging/code-events.h6
-rw-r--r--chromium/v8/src/logging/counters-definitions.h5
-rw-r--r--chromium/v8/src/logging/counters-inl.h64
-rw-r--r--chromium/v8/src/logging/counters.cc370
-rw-r--r--chromium/v8/src/logging/counters.h687
-rw-r--r--chromium/v8/src/logging/log.cc10
-rw-r--r--chromium/v8/src/logging/log.h3
-rw-r--r--chromium/v8/src/logging/runtime-call-stats.cc370
-rw-r--r--chromium/v8/src/logging/runtime-call-stats.h763
-rw-r--r--chromium/v8/src/numbers/conversions.cc48
-rw-r--r--chromium/v8/src/numbers/conversions.h4
-rw-r--r--chromium/v8/src/numbers/dtoa.h11
-rw-r--r--chromium/v8/src/objects/all-objects-inl.h1
-rw-r--r--chromium/v8/src/objects/allocation-site-inl.h12
-rw-r--r--chromium/v8/src/objects/allocation-site.h10
-rw-r--r--chromium/v8/src/objects/api-callbacks.h2
-rw-r--r--chromium/v8/src/objects/arguments.tq2
-rw-r--r--chromium/v8/src/objects/backing-store.cc282
-rw-r--r--chromium/v8/src/objects/backing-store.h49
-rw-r--r--chromium/v8/src/objects/bigint.cc131
-rw-r--r--chromium/v8/src/objects/bigint.h16
-rw-r--r--chromium/v8/src/objects/code-inl.h6
-rw-r--r--chromium/v8/src/objects/code-kind.cc2
-rw-r--r--chromium/v8/src/objects/code-kind.h39
-rw-r--r--chromium/v8/src/objects/code.cc124
-rw-r--r--chromium/v8/src/objects/code.h3
-rw-r--r--chromium/v8/src/objects/compilation-cache-table.cc34
-rw-r--r--chromium/v8/src/objects/compilation-cache-table.h7
-rw-r--r--chromium/v8/src/objects/compressed-slots-inl.h21
-rw-r--r--chromium/v8/src/objects/compressed-slots.h4
-rw-r--r--chromium/v8/src/objects/contexts-inl.h87
-rw-r--r--chromium/v8/src/objects/contexts.cc55
-rw-r--r--chromium/v8/src/objects/contexts.h67
-rw-r--r--chromium/v8/src/objects/contexts.tq32
-rw-r--r--chromium/v8/src/objects/debug-objects-inl.h6
-rw-r--r--chromium/v8/src/objects/debug-objects.cc35
-rw-r--r--chromium/v8/src/objects/debug-objects.h8
-rw-r--r--chromium/v8/src/objects/debug-objects.tq6
-rw-r--r--chromium/v8/src/objects/descriptor-array.h4
-rw-r--r--chromium/v8/src/objects/dictionary.h12
-rw-r--r--chromium/v8/src/objects/elements-kind.cc12
-rw-r--r--chromium/v8/src/objects/elements-kind.h58
-rw-r--r--chromium/v8/src/objects/elements.cc870
-rw-r--r--chromium/v8/src/objects/elements.h43
-rw-r--r--chromium/v8/src/objects/embedder-data-slot-inl.h30
-rw-r--r--chromium/v8/src/objects/embedder-data-slot.h5
-rw-r--r--chromium/v8/src/objects/feedback-vector-inl.h14
-rw-r--r--chromium/v8/src/objects/feedback-vector.cc73
-rw-r--r--chromium/v8/src/objects/feedback-vector.h26
-rw-r--r--chromium/v8/src/objects/feedback-vector.tq2
-rw-r--r--chromium/v8/src/objects/fixed-array-inl.h23
-rw-r--r--chromium/v8/src/objects/fixed-array.h17
-rw-r--r--chromium/v8/src/objects/fixed-array.tq4
-rw-r--r--chromium/v8/src/objects/foreign-inl.h3
-rw-r--r--chromium/v8/src/objects/free-space-inl.h8
-rw-r--r--chromium/v8/src/objects/free-space.h3
-rw-r--r--chromium/v8/src/objects/function-kind.h8
-rw-r--r--chromium/v8/src/objects/hash-table-inl.h5
-rw-r--r--chromium/v8/src/objects/hash-table.h16
-rw-r--r--chromium/v8/src/objects/heap-object.h16
-rw-r--r--chromium/v8/src/objects/instance-type-inl.h4
-rw-r--r--chromium/v8/src/objects/instance-type.h7
-rw-r--r--chromium/v8/src/objects/internal-index.h3
-rw-r--r--chromium/v8/src/objects/intl-objects.cc94
-rw-r--r--chromium/v8/src/objects/intl-objects.h2
-rw-r--r--chromium/v8/src/objects/js-array-buffer-inl.h51
-rw-r--r--chromium/v8/src/objects/js-array-buffer.cc51
-rw-r--r--chromium/v8/src/objects/js-array-buffer.h37
-rw-r--r--chromium/v8/src/objects/js-array-buffer.tq32
-rw-r--r--chromium/v8/src/objects/js-array.h4
-rw-r--r--chromium/v8/src/objects/js-function-inl.h21
-rw-r--r--chromium/v8/src/objects/js-function.cc65
-rw-r--r--chromium/v8/src/objects/js-function.h12
-rw-r--r--chromium/v8/src/objects/js-locale.cc344
-rw-r--r--chromium/v8/src/objects/js-locale.h15
-rw-r--r--chromium/v8/src/objects/js-objects-inl.h29
-rw-r--r--chromium/v8/src/objects/js-objects.cc60
-rw-r--r--chromium/v8/src/objects/js-objects.h22
-rw-r--r--chromium/v8/src/objects/js-objects.tq7
-rw-r--r--chromium/v8/src/objects/js-regexp.cc21
-rw-r--r--chromium/v8/src/objects/js-regexp.h3
-rw-r--r--chromium/v8/src/objects/literal-objects.cc62
-rw-r--r--chromium/v8/src/objects/literal-objects.h12
-rw-r--r--chromium/v8/src/objects/lookup-inl.h5
-rw-r--r--chromium/v8/src/objects/lookup.cc35
-rw-r--r--chromium/v8/src/objects/map-inl.h140
-rw-r--r--chromium/v8/src/objects/map-updater.cc280
-rw-r--r--chromium/v8/src/objects/map-updater.h26
-rw-r--r--chromium/v8/src/objects/map.cc317
-rw-r--r--chromium/v8/src/objects/map.h61
-rw-r--r--chromium/v8/src/objects/map.tq3
-rw-r--r--chromium/v8/src/objects/megadom-handler-inl.h26
-rw-r--r--chromium/v8/src/objects/megadom-handler.h32
-rw-r--r--chromium/v8/src/objects/megadom-handler.tq11
-rw-r--r--chromium/v8/src/objects/object-list-macros.h4
-rw-r--r--chromium/v8/src/objects/object-macros-undef.h6
-rw-r--r--chromium/v8/src/objects/object-macros.h27
-rw-r--r--chromium/v8/src/objects/objects-body-descriptors-inl.h62
-rw-r--r--chromium/v8/src/objects/objects-definitions.h6
-rw-r--r--chromium/v8/src/objects/objects-inl.h81
-rw-r--r--chromium/v8/src/objects/objects.cc282
-rw-r--r--chromium/v8/src/objects/objects.h26
-rw-r--r--chromium/v8/src/objects/ordered-hash-table.cc44
-rw-r--r--chromium/v8/src/objects/ordered-hash-table.h50
-rw-r--r--chromium/v8/src/objects/property-array-inl.h7
-rw-r--r--chromium/v8/src/objects/property-array.h6
-rw-r--r--chromium/v8/src/objects/scope-info.cc17
-rw-r--r--chromium/v8/src/objects/scope-info.h13
-rw-r--r--chromium/v8/src/objects/script-inl.h46
-rw-r--r--chromium/v8/src/objects/script.h36
-rw-r--r--chromium/v8/src/objects/script.tq8
-rw-r--r--chromium/v8/src/objects/shared-function-info-inl.h32
-rw-r--r--chromium/v8/src/objects/shared-function-info.cc10
-rw-r--r--chromium/v8/src/objects/shared-function-info.h34
-rw-r--r--chromium/v8/src/objects/shared-function-info.tq1
-rw-r--r--chromium/v8/src/objects/slots-inl.h21
-rw-r--r--chromium/v8/src/objects/slots.h4
-rw-r--r--chromium/v8/src/objects/smi.h10
-rw-r--r--chromium/v8/src/objects/source-text-module.cc6
-rw-r--r--chromium/v8/src/objects/source-text-module.h13
-rw-r--r--chromium/v8/src/objects/source-text-module.tq2
-rw-r--r--chromium/v8/src/objects/string-inl.h17
-rw-r--r--chromium/v8/src/objects/string-table.cc25
-rw-r--r--chromium/v8/src/objects/string-table.h4
-rw-r--r--chromium/v8/src/objects/string.cc14
-rw-r--r--chromium/v8/src/objects/string.h13
-rw-r--r--chromium/v8/src/objects/struct-inl.h7
-rw-r--r--chromium/v8/src/objects/struct.h1
-rw-r--r--chromium/v8/src/objects/swiss-name-dictionary-inl.h43
-rw-r--r--chromium/v8/src/objects/swiss-name-dictionary.cc5
-rw-r--r--chromium/v8/src/objects/swiss-name-dictionary.h26
-rw-r--r--chromium/v8/src/objects/tagged-field-inl.h62
-rw-r--r--chromium/v8/src/objects/tagged-field.h9
-rw-r--r--chromium/v8/src/objects/tagged-impl.h3
-rw-r--r--chromium/v8/src/objects/tagged-index.h4
-rw-r--r--chromium/v8/src/objects/templates-inl.h33
-rw-r--r--chromium/v8/src/objects/templates.h51
-rw-r--r--chromium/v8/src/objects/templates.tq8
-rw-r--r--chromium/v8/src/objects/torque-defined-classes.h1
-rw-r--r--chromium/v8/src/objects/transitions.cc10
-rw-r--r--chromium/v8/src/objects/transitions.h8
-rw-r--r--chromium/v8/src/objects/value-serializer.cc48
-rw-r--r--chromium/v8/src/objects/visitors.h5
-rw-r--r--chromium/v8/src/parsing/literal-buffer.cc4
-rw-r--r--chromium/v8/src/parsing/literal-buffer.h4
-rw-r--r--chromium/v8/src/parsing/parse-info.cc4
-rw-r--r--chromium/v8/src/parsing/parse-info.h4
-rw-r--r--chromium/v8/src/parsing/parser-base.h49
-rw-r--r--chromium/v8/src/parsing/parser.cc62
-rw-r--r--chromium/v8/src/parsing/parser.h13
-rw-r--r--chromium/v8/src/parsing/pending-compilation-error-handler.cc12
-rw-r--r--chromium/v8/src/parsing/pending-compilation-error-handler.h12
-rw-r--r--chromium/v8/src/parsing/preparser.cc7
-rw-r--r--chromium/v8/src/parsing/rewriter.cc7
-rw-r--r--chromium/v8/src/parsing/scanner-character-streams.cc6
-rw-r--r--chromium/v8/src/parsing/scanner.cc8
-rw-r--r--chromium/v8/src/parsing/scanner.h8
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.cc21
-rw-r--r--chromium/v8/src/profiler/cpu-profiler.h20
-rw-r--r--chromium/v8/src/profiler/heap-profiler.cc5
-rw-r--r--chromium/v8/src/profiler/heap-profiler.h3
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.cc148
-rw-r--r--chromium/v8/src/profiler/heap-snapshot-generator.h31
-rw-r--r--chromium/v8/src/profiler/profile-generator.cc33
-rw-r--r--chromium/v8/src/profiler/profile-generator.h33
-rw-r--r--chromium/v8/src/profiler/profiler-listener.cc7
-rw-r--r--chromium/v8/src/profiler/profiler-listener.h1
-rw-r--r--chromium/v8/src/profiler/sampling-heap-profiler.cc2
-rw-r--r--chromium/v8/src/profiler/tick-sample.cc20
-rw-r--r--chromium/v8/src/profiler/tick-sample.h1
-rw-r--r--chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc14
-rw-r--r--chromium/v8/src/regexp/ppc/OWNERS5
-rw-r--r--chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc98
-rw-r--r--chromium/v8/src/regexp/regexp-ast.cc4
-rw-r--r--chromium/v8/src/regexp/regexp-ast.h3
-rw-r--r--chromium/v8/src/regexp/regexp-bytecodes.h4
-rw-r--r--chromium/v8/src/regexp/regexp-compiler.cc34
-rw-r--r--chromium/v8/src/regexp/regexp-dotprinter.cc3
-rw-r--r--chromium/v8/src/regexp/regexp-macro-assembler.cc7
-rw-r--r--chromium/v8/src/regexp/regexp-nodes.h5
-rw-r--r--chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc15
-rw-r--r--chromium/v8/src/roots/roots.cc4
-rw-r--r--chromium/v8/src/roots/roots.h9
-rw-r--r--chromium/v8/src/runtime/runtime-array.cc15
-rw-r--r--chromium/v8/src/runtime/runtime-classes.cc7
-rw-r--r--chromium/v8/src/runtime/runtime-compiler.cc64
-rw-r--r--chromium/v8/src/runtime/runtime-debug.cc367
-rw-r--r--chromium/v8/src/runtime/runtime-internal.cc28
-rw-r--r--chromium/v8/src/runtime/runtime-literals.cc3
-rw-r--r--chromium/v8/src/runtime/runtime-object.cc117
-rw-r--r--chromium/v8/src/runtime/runtime-promise.cc25
-rw-r--r--chromium/v8/src/runtime/runtime-regexp.cc10
-rw-r--r--chromium/v8/src/runtime/runtime-scopes.cc4
-rw-r--r--chromium/v8/src/runtime/runtime-strings.cc2
-rw-r--r--chromium/v8/src/runtime/runtime-test-wasm.cc9
-rw-r--r--chromium/v8/src/runtime/runtime-test.cc69
-rw-r--r--chromium/v8/src/runtime/runtime-typedarray.cc24
-rw-r--r--chromium/v8/src/runtime/runtime.cc3
-rw-r--r--chromium/v8/src/runtime/runtime.h260
-rw-r--r--chromium/v8/src/sanitizer/asan.h28
-rw-r--r--chromium/v8/src/sanitizer/msan.h36
-rw-r--r--chromium/v8/src/sanitizer/tsan.h47
-rw-r--r--chromium/v8/src/snapshot/code-serializer.cc3
-rw-r--r--chromium/v8/src/snapshot/context-deserializer.cc4
-rw-r--r--chromium/v8/src/snapshot/deserializer.cc3
-rw-r--r--chromium/v8/src/snapshot/embedded/embedded-data.cc21
-rw-r--r--chromium/v8/src/snapshot/embedded/embedded-data.h8
-rw-r--r--chromium/v8/src/snapshot/mksnapshot.cc4
-rw-r--r--chromium/v8/src/snapshot/object-deserializer.cc4
-rw-r--r--chromium/v8/src/snapshot/read-only-serializer.cc17
-rw-r--r--chromium/v8/src/snapshot/read-only-serializer.h2
-rw-r--r--chromium/v8/src/snapshot/serializer-deserializer.h9
-rw-r--r--chromium/v8/src/snapshot/serializer.cc2
-rw-r--r--chromium/v8/src/snapshot/serializer.h4
-rw-r--r--chromium/v8/src/snapshot/snapshot-utils.cc2
-rw-r--r--chromium/v8/src/snapshot/snapshot.cc26
-rw-r--r--chromium/v8/src/snapshot/snapshot.h9
-rw-r--r--chromium/v8/src/strings/unicode.cc4461
-rw-r--r--chromium/v8/src/third_party/vtune/BUILD.gn9
-rw-r--r--chromium/v8/src/torque/class-debug-reader-generator.cc5
-rw-r--r--chromium/v8/src/torque/constants.h18
-rw-r--r--chromium/v8/src/torque/global-context.h2
-rw-r--r--chromium/v8/src/torque/implementation-visitor.cc248
-rw-r--r--chromium/v8/src/torque/torque-parser.cc19
-rw-r--r--chromium/v8/src/torque/torque.cc5
-rw-r--r--chromium/v8/src/tracing/trace-event.cc4
-rw-r--r--chromium/v8/src/tracing/trace-event.h9
-rw-r--r--chromium/v8/src/trap-handler/DEPS19
-rw-r--r--chromium/v8/src/trap-handler/handler-inside-posix.h1
-rw-r--r--chromium/v8/src/trap-handler/handler-inside-win.h4
-rw-r--r--chromium/v8/src/trap-handler/handler-inside.cc7
-rw-r--r--chromium/v8/src/trap-handler/handler-outside-posix.cc6
-rw-r--r--chromium/v8/src/trap-handler/handler-outside-win.cc2
-rw-r--r--chromium/v8/src/trap-handler/handler-outside.cc35
-rw-r--r--chromium/v8/src/trap-handler/handler-shared.cc2
-rw-r--r--chromium/v8/src/trap-handler/trap-handler-internal.h2
-rw-r--r--chromium/v8/src/trap-handler/trap-handler.h72
-rw-r--r--chromium/v8/src/utils/allocation.cc138
-rw-r--r--chromium/v8/src/utils/allocation.h105
-rw-r--r--chromium/v8/src/utils/ostreams.cc2
-rw-r--r--chromium/v8/src/utils/ostreams.h10
-rw-r--r--chromium/v8/src/utils/v8dll-main.cc2
-rw-r--r--chromium/v8/src/utils/vector.h2
-rw-r--r--chromium/v8/src/wasm/OWNERS1
-rw-r--r--chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h8
-rw-r--r--chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h97
-rw-r--r--chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h158
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.cc32
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-assembler.h9
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-compiler.cc325
-rw-r--r--chromium/v8/src/wasm/baseline/liftoff-register.h21
-rw-r--r--chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h12
-rw-r--r--chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h12
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/OWNERS5
-rw-r--r--chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h165
-rw-r--r--chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h303
-rw-r--r--chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h350
-rw-r--r--chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h147
-rw-r--r--chromium/v8/src/wasm/branch-hint-map.h46
-rw-r--r--chromium/v8/src/wasm/c-api.cc46
-rw-r--r--chromium/v8/src/wasm/compilation-environment.h12
-rw-r--r--chromium/v8/src/wasm/function-body-decoder-impl.h681
-rw-r--r--chromium/v8/src/wasm/function-compiler.cc17
-rw-r--r--chromium/v8/src/wasm/function-compiler.h9
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.cc138
-rw-r--r--chromium/v8/src/wasm/graph-builder-interface.h3
-rw-r--r--chromium/v8/src/wasm/memory-protection-key.cc189
-rw-r--r--chromium/v8/src/wasm/memory-protection-key.h90
-rw-r--r--chromium/v8/src/wasm/memory-tracing.cc2
-rw-r--r--chromium/v8/src/wasm/module-compiler.cc65
-rw-r--r--chromium/v8/src/wasm/module-decoder.cc360
-rw-r--r--chromium/v8/src/wasm/module-decoder.h10
-rw-r--r--chromium/v8/src/wasm/module-instantiate.cc48
-rw-r--r--chromium/v8/src/wasm/value-type.h39
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.cc439
-rw-r--r--chromium/v8/src/wasm/wasm-code-manager.h119
-rw-r--r--chromium/v8/src/wasm/wasm-constants.h7
-rw-r--r--chromium/v8/src/wasm/wasm-debug.cc10
-rw-r--r--chromium/v8/src/wasm/wasm-engine.cc78
-rw-r--r--chromium/v8/src/wasm/wasm-engine.h12
-rw-r--r--chromium/v8/src/wasm/wasm-feature-flags.h14
-rw-r--r--chromium/v8/src/wasm/wasm-init-expr.cc57
-rw-r--r--chromium/v8/src/wasm/wasm-init-expr.h150
-rw-r--r--chromium/v8/src/wasm/wasm-js.cc35
-rw-r--r--chromium/v8/src/wasm/wasm-limits.h5
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.cc12
-rw-r--r--chromium/v8/src/wasm/wasm-module-builder.h2
-rw-r--r--chromium/v8/src/wasm/wasm-module.cc14
-rw-r--r--chromium/v8/src/wasm/wasm-module.h32
-rw-r--r--chromium/v8/src/wasm/wasm-objects-inl.h38
-rw-r--r--chromium/v8/src/wasm/wasm-objects.cc130
-rw-r--r--chromium/v8/src/wasm/wasm-objects.h43
-rw-r--r--chromium/v8/src/wasm/wasm-objects.tq22
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes-inl.h7
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.cc3
-rw-r--r--chromium/v8/src/wasm/wasm-opcodes.h184
-rw-r--r--chromium/v8/src/wasm/wasm-serialization.cc6
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.cc71
-rw-r--r--chromium/v8/src/wasm/wasm-subtyping.h14
-rw-r--r--chromium/v8/src/wasm/wasm-value.h2
-rw-r--r--chromium/v8/src/web-snapshot/web-snapshot.cc189
-rw-r--r--chromium/v8/src/web-snapshot/web-snapshot.h15
-rw-r--r--chromium/v8/src/zone/zone-segment.cc2
-rw-r--r--chromium/v8/src/zone/zone.cc2
-rw-r--r--chromium/v8/test/benchmarks/cpp/BUILD.gn5
-rw-r--r--chromium/v8/test/cctest/BUILD.gn4
-rw-r--r--chromium/v8/test/mjsunit/BUILD.gn8
-rw-r--r--chromium/v8/test/unittests/BUILD.gn9
-rw-r--r--chromium/v8/third_party/google_benchmark/BUILD.gn8
-rw-r--r--chromium/v8/third_party/googletest/BUILD.gn38
-rw-r--r--chromium/v8/third_party/inspector_protocol/BUILD.gn6
-rw-r--r--chromium/v8/third_party/inspector_protocol/README.v82
-rw-r--r--chromium/v8/third_party/inspector_protocol/crdtp/serializable.cc5
-rw-r--r--chromium/v8/third_party/inspector_protocol/crdtp/status.cc2
-rw-r--r--chromium/v8/third_party/inspector_protocol/crdtp/status.h1
-rw-r--r--chromium/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template4
-rw-r--r--chromium/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template5
-rw-r--r--chromium/v8/third_party/inspector_protocol/pdl.py3
-rw-r--r--chromium/v8/third_party/jsoncpp/BUILD.gn2
-rwxr-xr-xchromium/v8/tools/clusterfuzz/v8_foozzie.py87
-rwxr-xr-xchromium/v8/tools/clusterfuzz/v8_foozzie_test.py32
-rw-r--r--chromium/v8/tools/clusterfuzz/v8_mock.js12
-rw-r--r--chromium/v8/tools/codemap.mjs11
-rwxr-xr-xchromium/v8/tools/cppgc/gen_cmake.py4
-rw-r--r--chromium/v8/tools/debug_helper/get-object-properties.cc14
-rwxr-xr-xchromium/v8/tools/dev/gm.py7
-rwxr-xr-xchromium/v8/tools/dev/v8gen.py10
-rwxr-xr-xchromium/v8/tools/generate-header-include-checks.py2
-rwxr-xr-xchromium/v8/tools/mb/mb.py17
-rw-r--r--chromium/v8/tools/profile.mjs13
-rw-r--r--chromium/v8/tools/profview/profile-utils.js63
-rw-r--r--chromium/v8/tools/profview/profview.js166
-rwxr-xr-xchromium/v8/tools/release/auto_roll.py4
-rwxr-xr-xchromium/v8/tools/release/test_scripts.py4
-rw-r--r--chromium/v8/tools/system-analyzer/index.css2
-rw-r--r--chromium/v8/tools/system-analyzer/index.mjs14
-rw-r--r--chromium/v8/tools/system-analyzer/processor.mjs24
-rw-r--r--chromium/v8/tools/system-analyzer/view/code-panel-template.html4
-rw-r--r--chromium/v8/tools/system-analyzer/view/code-panel.mjs13
-rw-r--r--chromium/v8/tools/system-analyzer/view/helper.mjs95
-rw-r--r--chromium/v8/tools/system-analyzer/view/list-panel-template.html4
-rw-r--r--chromium/v8/tools/system-analyzer/view/list-panel.mjs20
-rw-r--r--chromium/v8/tools/system-analyzer/view/map-panel-template.html2
-rw-r--r--chromium/v8/tools/system-analyzer/view/map-panel.mjs197
-rw-r--r--chromium/v8/tools/system-analyzer/view/map-panel/map-details.mjs2
-rw-r--r--chromium/v8/tools/system-analyzer/view/map-panel/map-transitions.mjs2
-rw-r--r--chromium/v8/tools/system-analyzer/view/script-panel-template.html10
-rw-r--r--chromium/v8/tools/system-analyzer/view/script-panel.mjs6
-rw-r--r--chromium/v8/tools/system-analyzer/view/timeline/timeline-track.mjs4
-rw-r--r--chromium/v8/tools/system-analyzer/view/tool-tip-template.html1
-rw-r--r--chromium/v8/tools/system-analyzer/view/tool-tip.mjs7
-rw-r--r--chromium/v8/tools/testrunner/base_runner.py28
-rw-r--r--chromium/v8/tools/testrunner/local/statusfile.py2
-rw-r--r--chromium/v8/tools/testrunner/local/utils.py52
-rw-r--r--chromium/v8/tools/testrunner/local/variants.py20
-rw-r--r--chromium/v8/tools/testrunner/outproc/message.py8
-rwxr-xr-xchromium/v8/tools/testrunner/standard_runner.py2
-rw-r--r--chromium/v8/tools/testrunner/testproc/fuzzer.py1
-rw-r--r--chromium/v8/tools/tickprocessor-driver.mjs8
-rw-r--r--chromium/v8/tools/tickprocessor.mjs25
-rw-r--r--chromium/v8/tools/ubsan/ignorelist.txt (renamed from chromium/v8/tools/ubsan/blacklist.txt)2
-rw-r--r--chromium/v8/tools/ubsan/vptr_ignorelist.txt (renamed from chromium/v8/tools/ubsan/vptr_blacklist.txt)6
-rwxr-xr-xchromium/v8/tools/unittests/run_tests_test.py3
-rwxr-xr-xchromium/v8/tools/v8_presubmit.py3
-rw-r--r--chromium/v8/tools/v8heapconst.py650
-rw-r--r--chromium/v8/tools/v8windbg/BUILD.gn5
-rw-r--r--chromium/v8/tools/v8windbg/README.md4
-rw-r--r--chromium/v8/tools/v8windbg/src/list-chunks.cc238
-rw-r--r--chromium/v8/tools/v8windbg/src/list-chunks.h100
-rw-r--r--chromium/v8/tools/v8windbg/src/v8windbg-extension.cc4
-rw-r--r--chromium/v8/tools/whitespace.txt4
1029 files changed, 38982 insertions, 32916 deletions
diff --git a/chromium/v8/.gn b/chromium/v8/.gn
index d4ad9599548..a691fa339b0 100644
--- a/chromium/v8/.gn
+++ b/chromium/v8/.gn
@@ -7,11 +7,21 @@ import("//build/dotfile_settings.gni")
# The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn"
+# The python interpreter to use by default. On Windows, this will look
+# for python3.exe and python3.bat.
+script_executable = "python3"
+
# These are the targets to check headers for by default. The files in targets
# matching these patterns (see "gn help label_pattern" for format) will have
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
-check_targets = []
+no_check_targets = [
+ "//:cppgc_base",
+ "//:v8_internal_headers",
+ "//src/inspector:inspector",
+ "//test/cctest:cctest_sources",
+ "//third_party/icu:*",
+]
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
diff --git a/chromium/v8/AUTHORS b/chromium/v8/AUTHORS
index 07644af9d18..15909406cc6 100644
--- a/chromium/v8/AUTHORS
+++ b/chromium/v8/AUTHORS
@@ -84,6 +84,7 @@ Colin Ihrig <cjihrig@gmail.com>
Cong Zuo <zckevinzc@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
+Daniel Dromboski <dandromb@gmail.com>
Daniel James <dnljms@gmail.com>
David Carlier <devnexen@gmail.com>
David Manouchehri <david@davidmanouchehri.com>
@@ -211,6 +212,7 @@ Seo Sanghyeon <sanxiyn@gmail.com>
Shawn Anastasio <shawnanastasio@gmail.com>
Shawn Presser <shawnpresser@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
+Stephen Belanger <stephen.belanger@datadoghq.com>
Sylvestre Ledru <sledru@mozilla.com>
Taketoshi Aono <brn@b6n.ch>
Tao Liqiang <taolq@outlook.com>
@@ -228,6 +230,7 @@ Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wei Wu <lazyparser@gmail.com>
Wenlu Wang <kingwenlu@gmail.com>
+Wenyu Zhao <wenyu.zhao@anu.edu.au>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Wouter Vermeiren <wouter.vermeiren@essensium.com>
Xiaofang Zou <zouxiaofang@iscas.ac.cn>
@@ -239,6 +242,7 @@ Yong Wang <ccyongwang@tencent.com>
Youfeng Hao <ajihyf@gmail.com>
Yu Yin <xwafish@gmail.com>
Yusif Khudhur <yusif.khudhur@gmail.com>
+Yuri Iozzelli <yuri@leaningtech.com>
Zac Hansen <xaxxon@gmail.com>
Zeynep Cankara <zeynepcankara402@gmail.com>
Zhao Jiazhong <kyslie3100@gmail.com>
diff --git a/chromium/v8/BUILD.gn b/chromium/v8/BUILD.gn
index d2bfb6129dc..5e3045bfdc1 100644
--- a/chromium/v8/BUILD.gn
+++ b/chromium/v8/BUILD.gn
@@ -41,7 +41,7 @@ declare_args() {
v8_enable_future = false
# Sets -DSYSTEM_INSTRUMENTATION. Enables OS-dependent event tracing
- v8_enable_system_instrumentation = true
+ v8_enable_system_instrumentation = is_win || is_mac
# Sets the GUID for the ETW provider
v8_etw_guid = ""
@@ -95,8 +95,19 @@ declare_args() {
v8_win64_unwinding_info = true
# Enable code comments for builtins in the snapshot (impacts performance).
+ # This also enables v8_code_comments.
v8_enable_snapshot_code_comments = false
+ # Allow runtime-enabled code comments (with --code-comments). Enabled by
+ # default in debug builds.
+ # Sets -dV8_CODE_COMMENTS
+ v8_code_comments = ""
+
+ # Allow runtime-enabled debug code (with --debug-code). Enabled by default in
+ # debug builds.
+ # Sets -dV8_ENABLE_DEBUG_CODE
+ v8_enable_debug_code = ""
+
# Enable native counters from the snapshot (impacts performance, sets
# -dV8_SNAPSHOT_NATIVE_CODE_COUNTERS).
# This option will generate extra code in the snapshot to increment counters,
@@ -200,10 +211,6 @@ declare_args() {
(is_linux || is_chromeos || is_mac)) ||
(v8_current_cpu == "ppc64" && (is_linux || is_chromeos))
- # Temporary flag to allow embedders to update their microtasks scopes
- # while rolling in a new version of V8.
- v8_check_microtasks_scopes_consistency = ""
-
# Enable mitigations for executing untrusted code.
# Disabled by default on ia32 due to conflicting requirements with embedded
# builtins. Enabled by default on Android since it doesn't support
@@ -272,6 +279,10 @@ declare_args() {
# Enable heap reservation of size 4GB. Only possible for 64bit archs.
cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64"
+ # Enable verification of live bytes in the marking verifier.
+ # TODO(v8:11785): Enable by default when running with the verifier.
+ cppgc_enable_verify_live_bytes = false
+
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
@@ -300,6 +311,12 @@ declare_args() {
# meaning that they are not switched to fast mode.
# Sets -DV8_DICT_PROPERTY_CONST_TRACKING
v8_dict_property_const_tracking = false
+
+ # Enable map packing & unpacking (sets -dV8_MAP_PACKING).
+ v8_enable_map_packing = false
+
+ # Allow for JS promise hooks (instead of just C++).
+ v8_allow_javascript_in_promise_hooks = false
}
# Derived defaults.
@@ -321,9 +338,15 @@ if (v8_enable_test_features == "") {
if (v8_enable_v8_checks == "") {
v8_enable_v8_checks = v8_enable_debugging_features
}
-if (v8_check_microtasks_scopes_consistency == "") {
- v8_check_microtasks_scopes_consistency =
- v8_enable_debugging_features || dcheck_always_on
+if (v8_enable_snapshot_code_comments) {
+ assert(v8_code_comments == true || v8_code_comments == "",
+ "v8_enable_snapshot_code_comments conflicts with v8_code_comments.")
+ v8_code_comments = true
+} else if (v8_code_comments == "") {
+ v8_code_comments = v8_enable_debugging_features
+}
+if (v8_enable_debug_code == "") {
+ v8_enable_debug_code = v8_enable_debugging_features
}
if (v8_enable_snapshot_native_code_counters == "") {
v8_enable_snapshot_native_code_counters = v8_enable_debugging_features
@@ -333,7 +356,7 @@ if (v8_enable_pointer_compression == "") {
v8_current_cpu == "arm64" || v8_current_cpu == "x64"
}
if (v8_enable_pointer_compression_shared_cage == "") {
- v8_enable_pointer_compression_shared_cage = false
+ v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
}
if (v8_enable_fast_torque == "") {
v8_enable_fast_torque = v8_enable_fast_mksnapshot
@@ -357,6 +380,13 @@ if (v8_enable_atomic_object_field_writes == "") {
if (v8_enable_atomic_marking_state == "") {
v8_enable_atomic_marking_state = v8_enable_concurrent_marking
}
+if (v8_enable_third_party_heap) {
+ v8_disable_write_barriers = true
+ v8_enable_single_generation = true
+ v8_enable_shared_ro_heap = false
+ v8_enable_pointer_compression = false
+ v8_enable_pointer_compression_shared_cage = false
+}
assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes,
"Concurrent marking requires atomic object field writes.")
assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state,
@@ -386,7 +416,8 @@ if (v8_enable_short_builtin_calls &&
v8_enable_short_builtin_calls = false
}
if (v8_enable_shared_ro_heap == "") {
- v8_enable_shared_ro_heap = !v8_enable_pointer_compression
+ v8_enable_shared_ro_heap = !v8_enable_pointer_compression ||
+ v8_enable_pointer_compression_shared_cage
}
assert(!v8_disable_write_barriers || v8_enable_single_generation,
@@ -398,15 +429,18 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
-if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) {
+if (v8_enable_shared_ro_heap && v8_enable_pointer_compression &&
+ !v8_enable_pointer_compression_shared_cage) {
assert(
is_linux || is_chromeos || is_android,
"Sharing read-only heap with pointer compression is only supported on Linux or Android")
}
-assert(
- !v8_enable_pointer_compression_shared_cage || !v8_enable_shared_ro_heap,
- "Sharing read-only heap is not yet supported when sharing a pointer compression cage")
+assert(!v8_enable_map_packing || !v8_enable_pointer_compression,
+ "Map packing does not support pointer compression")
+
+assert(!v8_enable_map_packing || v8_current_cpu == "x64",
+ "Map packing is only supported on x64")
assert(!v8_use_multi_snapshots || !v8_control_flow_integrity,
"Control-flow integrity does not support multisnapshots")
@@ -418,6 +452,10 @@ assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
"Can't share a pointer compression cage if pointers aren't compressed")
+assert(!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
+ v8_current_cpu == "arm64",
+ "Sharing a pointer compression cage is only supported on x64 and arm64")
+
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
@@ -566,6 +604,7 @@ external_v8_defines = [
"V8_IMMINENT_DEPRECATION_WARNINGS",
"V8_NO_ARGUMENTS_ADAPTOR",
"V8_USE_PERFETTO",
+ "V8_MAP_PACKING",
]
enabled_external_v8_defines = []
@@ -575,11 +614,11 @@ if (v8_enable_v8_checks) {
}
if (v8_enable_pointer_compression) {
enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS" ]
-}
-if (v8_enable_pointer_compression_shared_cage) {
- enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" ]
-} else if (v8_enable_pointer_compression) {
- enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" ]
+ if (v8_enable_pointer_compression_shared_cage) {
+ enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" ]
+ } else {
+ enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" ]
+ }
}
if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) {
enabled_external_v8_defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ]
@@ -599,6 +638,9 @@ if (v8_imminent_deprecation_warnings) {
if (v8_use_perfetto) {
enabled_external_v8_defines += [ "V8_USE_PERFETTO" ]
}
+if (v8_enable_map_packing) {
+ enabled_external_v8_defines += [ "V8_MAP_PACKING" ]
+}
disabled_external_v8_defines = external_v8_defines - enabled_external_v8_defines
@@ -665,6 +707,10 @@ config("features") {
":cppgc_header_features",
]
+ if (cppgc_enable_verify_live_bytes) {
+ defines += [ "CPPGC_VERIFY_LIVE_BYTES" ]
+ }
+
if (v8_embedder_string != "") {
defines += [ "V8_EMBEDDER_STRING=\"$v8_embedder_string\"" ]
}
@@ -729,6 +775,12 @@ config("features") {
if (v8_enable_handle_zapping) {
defines += [ "ENABLE_HANDLE_ZAPPING" ]
}
+ if (v8_code_comments == true) {
+ defines += [ "V8_CODE_COMMENTS" ]
+ }
+ if (v8_enable_debug_code) {
+ defines += [ "V8_ENABLE_DEBUG_CODE" ]
+ }
if (v8_enable_snapshot_native_code_counters) {
defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ]
}
@@ -756,9 +808,6 @@ config("features") {
if (v8_enable_lazy_source_positions) {
defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ]
}
- if (v8_check_microtasks_scopes_consistency) {
- defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
- }
if (v8_use_multi_snapshots) {
defines += [ "V8_MULTI_SNAPSHOTS" ]
}
@@ -807,6 +856,9 @@ config("features") {
if (v8_dict_property_const_tracking) {
defines += [ "V8_DICT_PROPERTY_CONST_TRACKING" ]
}
+ if (v8_allow_javascript_in_promise_hooks) {
+ defines += [ "V8_ALLOW_JAVASCRIPT_IN_PROMISE_HOOKS" ]
+ }
}
config("toolchain") {
@@ -1038,6 +1090,10 @@ config("toolchain") {
defines += [ "ENABLE_VERIFY_CSA" ]
}
+ if (v8_enable_runtime_call_stats) {
+ defines += [ "V8_RUNTIME_CALL_STATS" ]
+ }
+
if (!v8_untrusted_code_mitigations) {
defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ]
}
@@ -1244,6 +1300,8 @@ action("postmortem-metadata") {
"src/objects/map.cc",
"src/objects/map.h",
"src/objects/map-inl.h",
+ "src/objects/megadom-handler.h",
+ "src/objects/megadom-handler-inl.h",
"src/objects/name.h",
"src/objects/name-inl.h",
"src/objects/objects.h",
@@ -1432,6 +1490,7 @@ torque_files = [
"src/objects/js-weak-refs.tq",
"src/objects/literal-objects.tq",
"src/objects/map.tq",
+ "src/objects/megadom-handler.tq",
"src/objects/microtask.tq",
"src/objects/module.tq",
"src/objects/name.tq",
@@ -1515,44 +1574,37 @@ template("run_torque") {
destination_folder = "$target_gen_dir/torque-generated$suffix"
- files = [
- "$target_gen_dir/torque-generated/bit-fields.h",
- "$target_gen_dir/torque-generated/builtin-definitions.h",
- "$target_gen_dir/torque-generated/class-debug-readers.cc",
- "$target_gen_dir/torque-generated/class-debug-readers.h",
- "$target_gen_dir/torque-generated/class-forward-declarations.h",
- "$target_gen_dir/torque-generated/class-verifiers.cc",
- "$target_gen_dir/torque-generated/class-verifiers.h",
- "$target_gen_dir/torque-generated/csa-types.h",
- "$target_gen_dir/torque-generated/debug-macros.cc",
- "$target_gen_dir/torque-generated/debug-macros.h",
- "$target_gen_dir/torque-generated/enum-verifiers.cc",
- "$target_gen_dir/torque-generated/exported-macros-assembler.cc",
- "$target_gen_dir/torque-generated/exported-macros-assembler.h",
- "$target_gen_dir/torque-generated/factory.cc",
- "$target_gen_dir/torque-generated/factory.inc",
- "$target_gen_dir/torque-generated/field-offsets.h",
- "$target_gen_dir/torque-generated/instance-types.h",
- "$target_gen_dir/torque-generated/interface-descriptors.inc",
- "$target_gen_dir/torque-generated/objects-body-descriptors-inl.inc",
- "$target_gen_dir/torque-generated/objects-printer.cc",
+ outputs = [
+ "$destination_folder/bit-fields.h",
+ "$destination_folder/builtin-definitions.h",
+ "$destination_folder/class-debug-readers.cc",
+ "$destination_folder/class-debug-readers.h",
+ "$destination_folder/class-forward-declarations.h",
+ "$destination_folder/class-verifiers.cc",
+ "$destination_folder/class-verifiers.h",
+ "$destination_folder/csa-types.h",
+ "$destination_folder/debug-macros.cc",
+ "$destination_folder/debug-macros.h",
+ "$destination_folder/enum-verifiers.cc",
+ "$destination_folder/exported-macros-assembler.cc",
+ "$destination_folder/exported-macros-assembler.h",
+ "$destination_folder/factory.cc",
+ "$destination_folder/factory.inc",
+ "$destination_folder/field-offsets.h",
+ "$destination_folder/instance-types.h",
+ "$destination_folder/interface-descriptors.inc",
+ "$destination_folder/objects-body-descriptors-inl.inc",
+ "$destination_folder/objects-printer.cc",
]
- outputs = []
- foreach(file, files) {
- outputs += [ string_replace(file,
- "$target_gen_dir/torque-generated",
- destination_folder) ]
- }
-
foreach(file, torque_files) {
filetq = string_replace(file, ".tq", "-tq")
outputs += [
- "$target_gen_dir/torque-generated/$filetq-csa.cc",
- "$target_gen_dir/torque-generated/$filetq-csa.h",
- "$target_gen_dir/torque-generated/$filetq-inl.inc",
- "$target_gen_dir/torque-generated/$filetq.cc",
- "$target_gen_dir/torque-generated/$filetq.inc",
+ "$destination_folder/$filetq-csa.cc",
+ "$destination_folder/$filetq-csa.h",
+ "$destination_folder/$filetq-inl.inc",
+ "$destination_folder/$filetq.cc",
+ "$destination_folder/$filetq.inc",
]
}
@@ -1892,11 +1944,16 @@ action("v8_dump_build_config") {
"v8_enable_atomic_object_field_writes=" +
"$v8_enable_atomic_object_field_writes",
"v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
+ "v8_enable_single_generation=$v8_enable_single_generation",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_enable_verify_csa=$v8_enable_verify_csa",
"v8_enable_lite_mode=$v8_enable_lite_mode",
+ "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats",
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
+ "v8_enable_pointer_compression_shared_cage=" +
+ "$v8_enable_pointer_compression_shared_cage",
+ "v8_enable_third_party_heap=$v8_enable_third_party_heap",
"v8_enable_webassembly=$v8_enable_webassembly",
"v8_control_flow_integrity=$v8_control_flow_integrity",
"v8_target_cpu=\"$v8_target_cpu\"",
@@ -1918,7 +1975,10 @@ action("v8_dump_build_config") {
v8_source_set("v8_snapshot") {
visibility = [ ":*" ] # Targets in this file can depend on this.
- deps = []
+ deps = [
+ ":v8_internal_headers",
+ ":v8_libbase",
+ ]
public_deps = [
# This should be public so downstream targets can declare the snapshot
# output file as their inputs.
@@ -2000,7 +2060,6 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-data-view-gen.h",
"src/builtins/builtins-date-gen.cc",
- "src/builtins/builtins-debug-gen.cc",
"src/builtins/builtins-generator-gen.cc",
"src/builtins/builtins-global-gen.cc",
"src/builtins/builtins-handler-gen.cc",
@@ -2226,9 +2285,8 @@ if (v8_generate_external_defines_header) {
v8_header_set("v8_shared_internal_headers") {
visibility = [
":*",
- "test/cctest:*",
- "test/unittests:*",
- "tools/debug_helper/:*",
+ "test/*",
+ "tools/*",
]
configs = [ ":internal_config" ]
@@ -2245,7 +2303,10 @@ v8_header_set("v8_shared_internal_headers") {
}
v8_header_set("v8_flags") {
- visibility = [ ":*" ]
+ visibility = [
+ ":*",
+ "tools/*",
+ ]
configs = [ ":internal_config" ]
@@ -2273,6 +2334,7 @@ v8_header_set("v8_internal_headers") {
"src/api/api-arguments-inl.h",
"src/api/api-arguments.h",
"src/api/api-inl.h",
+ "src/api/api-macros-undef.h",
"src/api/api-macros.h",
"src/api/api-natives.h",
"src/api/api.h",
@@ -2289,6 +2351,7 @@ v8_header_set("v8_internal_headers") {
"src/baseline/baseline-assembler-inl.h",
"src/baseline/baseline-assembler.h",
"src/baseline/baseline-compiler.h",
+ "src/baseline/baseline-osr-inl.h",
"src/baseline/baseline.h",
"src/baseline/bytecode-offset-iterator.h",
"src/builtins/accessors.h",
@@ -2321,6 +2384,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/external-reference.h",
"src/codegen/flush-instruction-cache.h",
"src/codegen/handler-table.h",
+ "src/codegen/interface-descriptors-inl.h",
"src/codegen/interface-descriptors.h",
"src/codegen/label.h",
"src/codegen/machine-type.h",
@@ -2382,6 +2446,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/common-operator-reducer.h",
"src/compiler/common-operator.h",
"src/compiler/compilation-dependencies.h",
+ "src/compiler/compilation-dependency.h",
"src/compiler/compiler-source-position-table.h",
"src/compiler/constant-folding-reducer.h",
"src/compiler/control-equivalence.h",
@@ -2448,18 +2513,17 @@ v8_header_set("v8_internal_headers") {
"src/compiler/persistent-map.h",
"src/compiler/pipeline-statistics.h",
"src/compiler/pipeline.h",
+ "src/compiler/processed-feedback.h",
"src/compiler/property-access-builder.h",
"src/compiler/raw-machine-assembler.h",
"src/compiler/redundancy-elimination.h",
"src/compiler/refs-map.h",
"src/compiler/representation-change.h",
"src/compiler/schedule.h",
- "src/compiler/scheduled-machine-lowering.h",
"src/compiler/scheduler.h",
"src/compiler/select-lowering.h",
"src/compiler/serializer-for-background-compilation.h",
"src/compiler/serializer-hints.h",
- "src/compiler/simd-scalar-lowering.h",
"src/compiler/simplified-lowering.h",
"src/compiler/simplified-operator-reducer.h",
"src/compiler/simplified-operator.h",
@@ -2517,6 +2581,7 @@ v8_header_set("v8_internal_headers") {
"src/execution/interrupts-scope.h",
"src/execution/isolate-data.h",
"src/execution/isolate-inl.h",
+ "src/execution/isolate-utils-inl.h",
"src/execution/isolate-utils.h",
"src/execution/isolate.h",
"src/execution/local-isolate-inl.h",
@@ -2557,6 +2622,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/base-space.h",
"src/heap/basic-memory-chunk.h",
"src/heap/code-object-registry.h",
+ "src/heap/code-range.h",
"src/heap/code-stats.h",
"src/heap/collection-barrier.h",
"src/heap/combined-heap.h",
@@ -2592,11 +2658,13 @@ v8_header_set("v8_internal_headers") {
"src/heap/list.h",
"src/heap/local-allocator-inl.h",
"src/heap/local-allocator.h",
+ "src/heap/local-factory-inl.h",
"src/heap/local-factory.h",
"src/heap/local-heap-inl.h",
"src/heap/local-heap.h",
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.h",
+ "src/heap/marking-barrier-inl.h",
"src/heap/marking-barrier.h",
"src/heap/marking-visitor-inl.h",
"src/heap/marking-visitor.h",
@@ -2679,13 +2747,13 @@ v8_header_set("v8_internal_headers") {
"src/libsampler/sampler.h",
"src/logging/code-events.h",
"src/logging/counters-definitions.h",
- "src/logging/counters-inl.h",
"src/logging/counters.h",
"src/logging/local-logger.h",
"src/logging/log-inl.h",
"src/logging/log-utils.h",
"src/logging/log.h",
"src/logging/metrics.h",
+ "src/logging/runtime-call-stats.h",
"src/logging/tracing-flags.h",
"src/numbers/bignum-dtoa.h",
"src/numbers/bignum.h",
@@ -2766,6 +2834,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-collection-inl.h",
+ "src/objects/js-collection-iterator-inl.h",
"src/objects/js-collection-iterator.h",
"src/objects/js-collection.h",
"src/objects/js-function-inl.h",
@@ -2782,6 +2851,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/js-regexp-string-iterator-inl.h",
"src/objects/js-regexp-string-iterator.h",
"src/objects/js-regexp.h",
+ "src/objects/js-segments-inl.h",
+ "src/objects/js-segments.h",
"src/objects/js-weak-refs-inl.h",
"src/objects/js-weak-refs.h",
"src/objects/keys.h",
@@ -2797,6 +2868,8 @@ v8_header_set("v8_internal_headers") {
"src/objects/map.h",
"src/objects/maybe-object-inl.h",
"src/objects/maybe-object.h",
+ "src/objects/megadom-handler-inl.h",
+ "src/objects/megadom-handler.h",
"src/objects/microtask-inl.h",
"src/objects/microtask.h",
"src/objects/module-inl.h",
@@ -2831,6 +2904,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/property.h",
"src/objects/prototype-info-inl.h",
"src/objects/prototype-info.h",
+ "src/objects/prototype-inl.h",
"src/objects/prototype.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info-inl.h",
@@ -2842,6 +2916,7 @@ v8_header_set("v8_internal_headers") {
"src/objects/slots-atomic-inl.h",
"src/objects/slots-inl.h",
"src/objects/slots.h",
+ "src/objects/source-text-module-inl.h",
"src/objects/source-text-module.h",
"src/objects/stack-frame-info-inl.h",
"src/objects/stack-frame-info.h",
@@ -2880,6 +2955,7 @@ v8_header_set("v8_internal_headers") {
"src/parsing/expression-scope.h",
"src/parsing/func-name-inferrer.h",
"src/parsing/import-assertions.h",
+ "src/parsing/keywords-gen.h",
"src/parsing/literal-buffer.h",
"src/parsing/parse-info.h",
"src/parsing/parser-base.h",
@@ -2892,6 +2968,7 @@ v8_header_set("v8_internal_headers") {
"src/parsing/preparser.h",
"src/parsing/rewriter.h",
"src/parsing/scanner-character-streams.h",
+ "src/parsing/scanner-inl.h",
"src/parsing/scanner.h",
"src/parsing/token.h",
"src/profiler/allocation-tracker.h",
@@ -2939,10 +3016,6 @@ v8_header_set("v8_internal_headers") {
"src/roots/roots.h",
"src/runtime/runtime-utils.h",
"src/runtime/runtime.h",
- "src/sanitizer/asan.h",
- "src/sanitizer/lsan-page-allocator.h",
- "src/sanitizer/msan.h",
- "src/sanitizer/tsan.h",
"src/snapshot/code-serializer.h",
"src/snapshot/context-deserializer.h",
"src/snapshot/context-serializer.h",
@@ -2980,6 +3053,7 @@ v8_header_set("v8_internal_headers") {
"src/tasks/task-utils.h",
"src/third_party/siphash/halfsiphash.h",
"src/third_party/utf8-decoder/utf8-decoder.h",
+ "src/torque/runtime-macro-shims.h",
"src/tracing/trace-event.h",
"src/tracing/traced-value.h",
"src/tracing/tracing-category-observer.h",
@@ -3046,6 +3120,7 @@ v8_header_set("v8_internal_headers") {
"src/wasm/jump-table-assembler.h",
"src/wasm/leb-helper.h",
"src/wasm/local-decl-encoder.h",
+ "src/wasm/memory-protection-key.h",
"src/wasm/memory-tracing.h",
"src/wasm/module-compiler.h",
"src/wasm/module-decoder.h",
@@ -3063,6 +3138,7 @@ v8_header_set("v8_internal_headers") {
"src/wasm/wasm-feature-flags.h",
"src/wasm/wasm-features.h",
"src/wasm/wasm-import-wrapper-cache.h",
+ "src/wasm/wasm-init-expr.h",
"src/wasm/wasm-js.h",
"src/wasm/wasm-linkage.h",
"src/wasm/wasm-module-builder.h",
@@ -3070,6 +3146,7 @@ v8_header_set("v8_internal_headers") {
"src/wasm/wasm-module.h",
"src/wasm/wasm-objects-inl.h",
"src/wasm/wasm-objects.h",
+ "src/wasm/wasm-opcodes-inl.h",
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.h",
"src/wasm/wasm-serialization.h",
@@ -3140,6 +3217,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/ia32/assembler-ia32-inl.h",
"src/codegen/ia32/assembler-ia32.h",
"src/codegen/ia32/constants-ia32.h",
+ "src/codegen/ia32/interface-descriptors-ia32-inl.h",
"src/codegen/ia32/macro-assembler-ia32.h",
"src/codegen/ia32/register-ia32.h",
"src/codegen/ia32/sse-instr.h",
@@ -3158,6 +3236,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/x64/assembler-x64.h",
"src/codegen/x64/constants-x64.h",
"src/codegen/x64/fma-instr.h",
+ "src/codegen/x64/interface-descriptors-x64-inl.h",
"src/codegen/x64/macro-assembler-x64.h",
"src/codegen/x64/register-x64.h",
"src/codegen/x64/sse-instr.h",
@@ -3187,6 +3266,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/arm/assembler-arm-inl.h",
"src/codegen/arm/assembler-arm.h",
"src/codegen/arm/constants-arm.h",
+ "src/codegen/arm/interface-descriptors-arm-inl.h",
"src/codegen/arm/macro-assembler-arm.h",
"src/codegen/arm/register-arm.h",
"src/compiler/backend/arm/instruction-codes-arm.h",
@@ -3206,6 +3286,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/arm64/decoder-arm64-inl.h",
"src/codegen/arm64/decoder-arm64.h",
"src/codegen/arm64/instructions-arm64.h",
+ "src/codegen/arm64/interface-descriptors-arm64-inl.h",
"src/codegen/arm64/macro-assembler-arm64-inl.h",
"src/codegen/arm64/macro-assembler-arm64.h",
"src/codegen/arm64/register-arm64.h",
@@ -3259,11 +3340,10 @@ v8_header_set("v8_internal_headers") {
]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
- "src/baseline/ppc/baseline-assembler-ppc-inl.h",
- "src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.h",
+ "src/codegen/ppc/interface-descriptors-ppc-inl.h",
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
@@ -3275,11 +3355,10 @@ v8_header_set("v8_internal_headers") {
]
} else if (v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc64) ###
- "src/baseline/ppc/baseline-assembler-ppc-inl.h",
- "src/baseline/ppc/baseline-compiler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc-inl.h",
"src/codegen/ppc/assembler-ppc.h",
"src/codegen/ppc/constants-ppc.h",
+ "src/codegen/ppc/interface-descriptors-ppc-inl.h",
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
@@ -3296,6 +3375,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/s390/assembler-s390-inl.h",
"src/codegen/s390/assembler-s390.h",
"src/codegen/s390/constants-s390.h",
+ "src/codegen/s390/interface-descriptors-s390-inl.h",
"src/codegen/s390/macro-assembler-s390.h",
"src/codegen/s390/register-s390.h",
"src/compiler/backend/s390/instruction-codes-s390.h",
@@ -3384,6 +3464,7 @@ v8_compiler_sources = [
"src/compiler/graph-trimmer.cc",
"src/compiler/graph-visualizer.cc",
"src/compiler/graph.cc",
+ "src/compiler/heap-refs.cc",
"src/compiler/js-call-reducer.cc",
"src/compiler/js-context-specialization.cc",
"src/compiler/js-create-lowering.cc",
@@ -3430,7 +3511,6 @@ v8_compiler_sources = [
"src/compiler/refs-map.cc",
"src/compiler/representation-change.cc",
"src/compiler/schedule.cc",
- "src/compiler/scheduled-machine-lowering.cc",
"src/compiler/scheduler.cc",
"src/compiler/select-lowering.cc",
"src/compiler/serializer-for-background-compilation.cc",
@@ -3452,7 +3532,6 @@ v8_compiler_sources = [
if (v8_enable_webassembly) {
v8_compiler_sources += [
"src/compiler/int64-lowering.cc",
- "src/compiler/simd-scalar-lowering.cc",
"src/compiler/wasm-compiler.cc",
]
}
@@ -3676,6 +3755,7 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/base-space.cc",
"src/heap/basic-memory-chunk.cc",
"src/heap/code-object-registry.cc",
+ "src/heap/code-range.cc",
"src/heap/code-stats.cc",
"src/heap/collection-barrier.cc",
"src/heap/combined-heap.cc",
@@ -3763,6 +3843,7 @@ v8_source_set("v8_base_without_compiler") {
"src/logging/log-utils.cc",
"src/logging/log.cc",
"src/logging/metrics.cc",
+ "src/logging/runtime-call-stats.cc",
"src/logging/tracing-flags.cc",
"src/numbers/bignum-dtoa.cc",
"src/numbers/bignum.cc",
@@ -3908,7 +3989,6 @@ v8_source_set("v8_base_without_compiler") {
"src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-weak-refs.cc",
"src/runtime/runtime.cc",
- "src/sanitizer/lsan-page-allocator.cc",
"src/snapshot/code-serializer.cc",
"src/snapshot/context-deserializer.cc",
"src/snapshot/context-serializer.cc",
@@ -3977,6 +4057,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/graph-builder-interface.cc",
"src/wasm/jump-table-assembler.cc",
"src/wasm/local-decl-encoder.cc",
+ "src/wasm/memory-protection-key.cc",
"src/wasm/memory-tracing.cc",
"src/wasm/module-compiler.cc",
"src/wasm/module-decoder.cc",
@@ -3988,10 +4069,12 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/value-type.cc",
"src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-debug.cc",
+ "src/wasm/wasm-debug.h",
"src/wasm/wasm-engine.cc",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-features.cc",
"src/wasm/wasm-import-wrapper-cache.cc",
+ "src/wasm/wasm-init-expr.cc",
"src/wasm/wasm-js.cc",
"src/wasm/wasm-module-builder.cc",
"src/wasm/wasm-module-sourcemap.cc",
@@ -4007,7 +4090,10 @@ v8_source_set("v8_base_without_compiler") {
if (v8_enable_third_party_heap) {
sources += v8_third_party_heap_files
} else {
- sources += [ "src/heap/third-party/heap-api-stub.cc" ]
+ sources += [
+ "src/heap/third-party/heap-api-stub.cc",
+ "src/heap/third-party/heap-api.h",
+ ]
}
if (v8_enable_conservative_stack_scanning) {
@@ -4027,24 +4113,15 @@ v8_source_set("v8_base_without_compiler") {
]
}
- if (v8_check_header_includes) {
- # This file will be generated by tools/generate-header-include-checks.py
- # if the "check_v8_header_includes" gclient variable is set.
- import("check-header-includes/sources.gni")
- sources += check_header_includes_sources
- }
-
if (v8_current_cpu == "x86") {
sources += [ ### gcmole(arch:ia32) ###
"src/codegen/ia32/assembler-ia32.cc",
"src/codegen/ia32/cpu-ia32.cc",
- "src/codegen/ia32/interface-descriptors-ia32.cc",
"src/codegen/ia32/macro-assembler-ia32.cc",
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
- "src/debug/ia32/debug-ia32.cc",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
"src/diagnostics/ia32/unwinder-ia32.cc",
@@ -4056,13 +4133,11 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
"src/codegen/x64/assembler-x64.cc",
"src/codegen/x64/cpu-x64.cc",
- "src/codegen/x64/interface-descriptors-x64.cc",
"src/codegen/x64/macro-assembler-x64.cc",
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
- "src/debug/x64/debug-x64.cc",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
@@ -4091,13 +4166,11 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/arm/assembler-arm.cc",
"src/codegen/arm/constants-arm.cc",
"src/codegen/arm/cpu-arm.cc",
- "src/codegen/arm/interface-descriptors-arm.cc",
"src/codegen/arm/macro-assembler-arm.cc",
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
- "src/debug/arm/debug-arm.cc",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
@@ -4113,7 +4186,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/arm64/decoder-arm64.cc",
"src/codegen/arm64/instructions-arm64-constants.cc",
"src/codegen/arm64/instructions-arm64.cc",
- "src/codegen/arm64/interface-descriptors-arm64.cc",
"src/codegen/arm64/macro-assembler-arm64.cc",
"src/codegen/arm64/register-arm64.cc",
"src/codegen/arm64/utils-arm64.cc",
@@ -4121,7 +4193,6 @@ v8_source_set("v8_base_without_compiler") {
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
- "src/debug/arm64/debug-arm64.cc",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/eh-frame-arm64.cc",
@@ -4146,12 +4217,11 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/mips/assembler-mips.cc",
"src/codegen/mips/constants-mips.cc",
"src/codegen/mips/cpu-mips.cc",
- "src/codegen/mips/interface-descriptors-mips.cc",
+ "src/codegen/mips/interface-descriptors-mips-inl.h",
"src/codegen/mips/macro-assembler-mips.cc",
"src/compiler/backend/mips/code-generator-mips.cc",
"src/compiler/backend/mips/instruction-scheduler-mips.cc",
"src/compiler/backend/mips/instruction-selector-mips.cc",
- "src/debug/mips/debug-mips.cc",
"src/deoptimizer/mips/deoptimizer-mips.cc",
"src/diagnostics/mips/disasm-mips.cc",
"src/diagnostics/mips/unwinder-mips.cc",
@@ -4164,12 +4234,11 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/mips64/assembler-mips64.cc",
"src/codegen/mips64/constants-mips64.cc",
"src/codegen/mips64/cpu-mips64.cc",
- "src/codegen/mips64/interface-descriptors-mips64.cc",
+ "src/codegen/mips64/interface-descriptors-mips64-inl.h",
"src/codegen/mips64/macro-assembler-mips64.cc",
"src/compiler/backend/mips64/code-generator-mips64.cc",
"src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
"src/compiler/backend/mips64/instruction-selector-mips64.cc",
- "src/debug/mips64/debug-mips64.cc",
"src/deoptimizer/mips64/deoptimizer-mips64.cc",
"src/diagnostics/mips64/disasm-mips64.cc",
"src/diagnostics/mips64/unwinder-mips64.cc",
@@ -4182,13 +4251,11 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/cpu-ppc.cc",
- "src/codegen/ppc/interface-descriptors-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
- "src/debug/ppc/debug-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@@ -4202,13 +4269,11 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/ppc/assembler-ppc.cc",
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/cpu-ppc.cc",
- "src/codegen/ppc/interface-descriptors-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
- "src/debug/ppc/debug-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@@ -4222,13 +4287,11 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/s390/assembler-s390.cc",
"src/codegen/s390/constants-s390.cc",
"src/codegen/s390/cpu-s390.cc",
- "src/codegen/s390/interface-descriptors-s390.cc",
"src/codegen/s390/macro-assembler-s390.cc",
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
- "src/debug/s390/debug-s390.cc",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
@@ -4239,15 +4302,17 @@ v8_source_set("v8_base_without_compiler") {
]
} else if (v8_current_cpu == "riscv64") {
sources += [ ### gcmole(arch:riscv64) ###
+ "src/baseline/riscv64/baseline-assembler-riscv64-inl.h",
+ "src/baseline/riscv64/baseline-compiler-riscv64-inl.h",
+ "src/codegen/riscv64/assembler-riscv64-inl.h",
"src/codegen/riscv64/assembler-riscv64.cc",
"src/codegen/riscv64/constants-riscv64.cc",
"src/codegen/riscv64/cpu-riscv64.cc",
- "src/codegen/riscv64/interface-descriptors-riscv64.cc",
+ "src/codegen/riscv64/interface-descriptors-riscv64-inl.h",
"src/codegen/riscv64/macro-assembler-riscv64.cc",
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
- "src/debug/riscv64/debug-riscv64.cc",
"src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
"src/diagnostics/riscv64/disasm-riscv64.cc",
"src/diagnostics/riscv64/unwinder-riscv64.cc",
@@ -4497,8 +4562,6 @@ v8_component("v8_libbase") {
"src/base/atomic-utils.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_atomicword_compat.h",
- "src/base/atomicops_internals_portable.h",
- "src/base/atomicops_internals_std.h",
"src/base/base-export.h",
"src/base/bit-field.h",
"src/base/bits-iterator.h",
@@ -4532,7 +4595,6 @@ v8_component("v8_libbase") {
"src/base/lazy-instance.h",
"src/base/logging.cc",
"src/base/logging.h",
- "src/base/lsan.h",
"src/base/macros.h",
"src/base/memory.h",
"src/base/once.cc",
@@ -4558,6 +4620,11 @@ v8_component("v8_libbase") {
"src/base/safe_conversions.h",
"src/base/safe_conversions_arm_impl.h",
"src/base/safe_conversions_impl.h",
+ "src/base/sanitizer/asan.h",
+ "src/base/sanitizer/lsan-page-allocator.cc",
+ "src/base/sanitizer/lsan-page-allocator.h",
+ "src/base/sanitizer/lsan.h",
+ "src/base/sanitizer/msan.h",
"src/base/small-vector.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
@@ -4694,9 +4761,11 @@ v8_component("v8_libbase") {
if (is_tsan && !build_with_chromium) {
data += [ "tools/sanitizers/tsan_suppressions.txt" ]
+ }
- # llvm-symbolizer uses libstdc++ from the clang package.
- data += [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ]
+ if (using_sanitizer && !build_with_chromium) {
+ data_deps +=
+ [ "//build/config/clang:llvm-symbolizer_data($host_toolchain)" ]
}
# TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
@@ -4720,8 +4789,6 @@ v8_component("v8_libplatform") {
"src/libplatform/delayed-task-queue.h",
"src/libplatform/task-queue.cc",
"src/libplatform/task-queue.h",
- "src/libplatform/tracing/recorder-default.cc",
- "src/libplatform/tracing/recorder.h",
"src/libplatform/tracing/trace-buffer.cc",
"src/libplatform/tracing/trace-buffer.h",
"src/libplatform/tracing/trace-config.cc",
@@ -4752,8 +4819,6 @@ v8_component("v8_libplatform") {
if (v8_use_perfetto) {
sources -= [
"//base/trace_event/common/trace_event_common.h",
- "src/libplatform/tracing/recorder-default.cc",
- "src/libplatform/tracing/recorder.h",
"src/libplatform/tracing/trace-buffer.cc",
"src/libplatform/tracing/trace-buffer.h",
"src/libplatform/tracing/trace-object.cc",
@@ -4768,9 +4833,15 @@ v8_component("v8_libplatform") {
# TODO(skyostil): Switch TraceEventListener to protozero.
"//third_party/perfetto/protos/perfetto/trace:lite",
]
- } else if (is_win) {
- sources -= [ "src/libplatform/tracing/recorder-default.cc" ]
- sources += [ "src/libplatform/tracing/recorder-win.cc" ]
+ }
+
+ if (v8_enable_system_instrumentation) {
+ sources += [ "src/libplatform/tracing/recorder.h" ]
+ if (is_mac) {
+ sources += [ "src/libplatform/tracing/recorder-mac.cc" ]
+ } else if (is_win) {
+ sources += [ "src/libplatform/tracing/recorder-win.cc" ]
+ }
}
}
@@ -4794,8 +4865,13 @@ v8_source_set("fuzzer_support") {
v8_source_set("v8_bigint") {
sources = [
+ "src/bigint/bigint-internal.cc",
+ "src/bigint/bigint-internal.h",
"src/bigint/bigint.h",
+ "src/bigint/digit-arithmetic.h",
+ "src/bigint/mul-schoolbook.cc",
"src/bigint/vector-arithmetic.cc",
+ "src/bigint/vector-arithmetic.h",
]
configs = [ ":internal_config" ]
@@ -4807,7 +4883,6 @@ v8_source_set("v8_cppgc_shared") {
"src/heap/base/stack.h",
"src/heap/base/worklist.cc",
"src/heap/base/worklist.h",
- "src/heap/cppgc/sanitizers.h",
]
if (is_clang || !is_win) {
@@ -4954,6 +5029,8 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/marking-visitor.h",
"src/heap/cppgc/marking-worklists.cc",
"src/heap/cppgc/marking-worklists.h",
+ "src/heap/cppgc/memory.cc",
+ "src/heap/cppgc/memory.h",
"src/heap/cppgc/metric-recorder.h",
"src/heap/cppgc/name-trait.cc",
"src/heap/cppgc/object-allocator.cc",
@@ -4961,6 +5038,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/object-poisoner.h",
"src/heap/cppgc/object-size-trait.cc",
"src/heap/cppgc/object-start-bitmap.h",
+ "src/heap/cppgc/object-view.h",
"src/heap/cppgc/page-memory.cc",
"src/heap/cppgc/page-memory.h",
"src/heap/cppgc/persistent-node.cc",
@@ -5032,6 +5110,35 @@ v8_source_set("cppgc_base_for_testing") {
public_deps = [ ":cppgc_base" ]
}
+if (v8_check_header_includes) {
+ # This file will be generated by tools/generate-header-include-checks.py
+ # if the "check_v8_header_includes" gclient variable is set.
+ import("check-header-includes/sources.gni")
+ v8_source_set("check_headers") {
+ configs = [ ":internal_config" ]
+ sources = check_header_includes_sources
+
+ # Any rules that contain headers files should be added here either directly
+ # or indirectly by including something that has it transitively in its
+ # public_deps.
+ deps = [
+ ":d8",
+ ":mksnapshot",
+ ":torque_base",
+ ":torque_ls_base",
+ ":v8_base_without_compiler",
+ ":v8_bigint",
+ ":v8_initializers",
+ ":v8_internal_headers",
+ ":v8_libbase",
+ ":v8_maybe_icu",
+ ":wee8",
+ "src/inspector:inspector",
+ "src/inspector:inspector_string_conversions",
+ ]
+ }
+}
+
###############################################################################
# Produce a single static library for embedders
#
@@ -5284,6 +5391,10 @@ group("gn_all") {
if (want_v8_shell) {
deps += [ ":v8_shell" ]
}
+
+ if (v8_check_header_includes) {
+ deps += [ ":check_headers" ]
+ }
}
group("v8_python_base") {
@@ -6199,9 +6310,7 @@ if (!build_with_chromium && v8_use_perfetto) {
configs = [ ":v8_tracing_config" ]
public_configs = [ "//third_party/perfetto/gn:public_config" ]
deps = [
- "//third_party/perfetto/src/trace_processor:export_json",
"//third_party/perfetto/src/trace_processor:storage_minimal",
- "//third_party/perfetto/src/tracing:client_api",
"//third_party/perfetto/src/tracing/core",
# TODO(skyostil): Support non-POSIX platforms.
@@ -6210,5 +6319,11 @@ if (!build_with_chromium && v8_use_perfetto) {
"//third_party/perfetto/src/tracing:in_process_backend",
"//third_party/perfetto/src/tracing:platform_impl",
]
+
+ public_deps = [
+ "//third_party/perfetto/include/perfetto/trace_processor",
+ "//third_party/perfetto/src/trace_processor:export_json",
+ "//third_party/perfetto/src/tracing:client_api",
+ ]
}
} # if (!build_with_chromium && v8_use_perfetto)
diff --git a/chromium/v8/DEPS b/chromium/v8/DEPS
index b27a4e8e8fa..d3de2c5ddc9 100644
--- a/chromium/v8/DEPS
+++ b/chromium/v8/DEPS
@@ -9,7 +9,6 @@ gclient_gn_args = [
# TODO(https://crbug.com/1137662, https://crbug.com/1080854)
# Remove when migration is complete.
'checkout_fuchsia_for_arm64_host',
- 'checkout_google_benchmark',
]
vars = {
@@ -44,13 +43,11 @@ vars = {
'download_jsfunfuzz': False,
'check_v8_header_includes': False,
- 'checkout_google_benchmark' : False,
-
# GN CIPD package version.
- 'gn_version': 'git_revision:dba01723a441c358d843a575cb7720d54ddcdf92',
+ 'gn_version': 'git_revision:39a87c0b36310bdf06b692c098f199a0d97fc810',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:d6d24b11ecded4d89f3dfd1b2e5a0072a3d4ab15',
+ 'luci_go': 'git_revision:22d464e2f8f3bd2bd33f69fe819326d63f881008',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -88,15 +85,15 @@ vars = {
deps = {
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + '77edba11e25386aa719d4f08c3ce2d8c4f868c15',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '4036cf1b17581f5668b487a25e252d56e0321a7f',
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '98a52e2e312dd10d7fcf281e322039a6b706b86b',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '6b0a611c2c692684f94c0c3629f793feebd16b39',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '81d656878ec611cb0b42d52c82e9dae93920d9ba',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'f022e298b4f4a782486bb6d5ce6589c998b51fe2',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '084aee04777db574038af9e9d33ca5caed577462',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '4ae2535e8e894c3cd81d46aacdaf151b5df30709',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '5dbd89c9d9c0b0ff47cefdc2bc421b8c9a1c5a21',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '20b1d0fc13ebaa263a1248f08814f523a86e6bed',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94',
'buildtools/linux64': {
@@ -122,9 +119,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '8fa87946779682841e21e2da977eccfb6cb3bded',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'd0f33885a2ffa7d5af74af6065b60eb48e3c70f5',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '767de317f97343db64af048e3d198ab8b10fee5d',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '08f35c8514a74817103121def05351186830d4b7',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '7846d256355e40273f7cc192c8f5893e8665a1f9',
'buildtools/win': {
'packages': [
{
@@ -136,7 +133,7 @@ deps = {
'condition': 'host_os == "win"',
},
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'cab90cbdaaf4444d67aef6ce3cef09fc5fdeb560',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'd5bb24e5d9802c8c917fcaa4375d5239a586c168',
'third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '401019bf85744311b26c88ced255cd53401af8b7',
'condition': 'checkout_android',
@@ -184,7 +181,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + '41a5e5e465ad93d6e08224613d3544334a6278bc',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + 'c1e1d559b46476584ec0eb1d83bd7f43fa5a1b36',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -196,10 +193,9 @@ deps = {
'condition': 'checkout_fuchsia',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '07f4869221012b16b7f9ee685d94856e1fc9f361',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '23ef29555ef4789f555f1ba8c51b4c52975f0907',
'third_party/google_benchmark/src': {
- 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '7f27afe83b82f3a98baf58ef595814b9d42a5b2b',
- 'condition': 'checkout_google_benchmark',
+ 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '7d0d9061d83b663ce05d9de5da3d5865a3845b79',
},
'third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '11b6b3e5971d760bd2d310f77643f55a818a6d25',
@@ -212,7 +208,7 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '31126581e7290f9233c29cefd93f66c6ac78f1c9',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '6d353a4436747e2de8820efac27ae5ef7e601b60',
'test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
'third_party/qemu-linux-x64': {
@@ -239,7 +235,7 @@ deps = {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'SeLS6a0f6IL-PCOUKbMTN5LYgjjJbDSnb3DGf5q9pwsC'
+ 'version': '-Sz2gSN_5yVSHDlitjxUlmZpHuz-F2kFDW6TnmggCZoC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -256,7 +252,7 @@ deps = {
'dep_type': 'cipd',
},
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a387faa2a6741f565e45d78804a49a0e55de5909',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a38f01b956e091d5e698d2af484c81cd4e9a2a2d',
'tools/luci-go': {
'packages': [
{
@@ -290,7 +286,7 @@ deps = {
'third_party/protobuf':
Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3',
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '09490503d0f201b81e03f5ca0ab8ba8ee76d4a8e',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '5b8d433953beb2a75a755ba321a3076b95f7cdb9',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/ittapi': {
diff --git a/chromium/v8/OWNERS b/chromium/v8/OWNERS
index 2a478dbdc5c..2ad1949b515 100644
--- a/chromium/v8/OWNERS
+++ b/chromium/v8/OWNERS
@@ -21,7 +21,7 @@ per-file PRESUBMIT.py=file:INFRA_OWNERS
per-file codereview.settings=file:INFRA_OWNERS
per-file AUTHORS=file:COMMON_OWNERS
-per-file WATCHLIST=file:COMMON_OWNERS
+per-file WATCHLISTS=file:COMMON_OWNERS
per-file *-mips*=file:MIPS_OWNERS
per-file *-mips64*=file:MIPS_OWNERS
diff --git a/chromium/v8/PRESUBMIT.py b/chromium/v8/PRESUBMIT.py
index 2ee14d545ee..61963c62f62 100644
--- a/chromium/v8/PRESUBMIT.py
+++ b/chromium/v8/PRESUBMIT.py
@@ -279,7 +279,7 @@ def _CheckHeadersHaveIncludeGuards(input_api, output_api):
for line in f.NewContents():
for i in range(len(guard_patterns)):
if guard_patterns[i].match(line):
- found_patterns[i] = True
+ found_patterns[i] = True
if skip_check_pattern.match(line):
file_omitted = True
break
@@ -485,7 +485,9 @@ def _CheckNoexceptAnnotations(input_api, output_api):
files_to_check=(r'src[\\\/].*', r'test[\\\/].*'),
# Skip api.cc since we cannot easily add the 'noexcept' annotation to
# public methods.
- files_to_skip=(r'src[\\\/]api[\\\/]api\.cc',))
+ # Skip src/bigint/ because it's meant to be V8-independent.
+ files_to_skip=(r'src[\\\/]api[\\\/]api\.cc',
+ r'src[\\\/]bigint[\\\/].*'))
# matches any class name.
class_name = r'\b([A-Z][A-Za-z0-9_:]*)(?:::\1)?'
diff --git a/chromium/v8/RISCV_OWNERS b/chromium/v8/RISCV_OWNERS
index f3240b500b1..8f8e15a40a0 100644
--- a/chromium/v8/RISCV_OWNERS
+++ b/chromium/v8/RISCV_OWNERS
@@ -1,3 +1,3 @@
brice.dobry@futurewei.com
-lazyparser@gmail.com
peng.w@rioslab.org
+qiuji@iscas.ac.cn
diff --git a/chromium/v8/WATCHLISTS b/chromium/v8/WATCHLISTS
index fa95f144cb7..c54f15ad797 100644
--- a/chromium/v8/WATCHLISTS
+++ b/chromium/v8/WATCHLISTS
@@ -51,6 +51,9 @@
'|test/cctest/interpreter/' \
'|test/unittests/interpreter/',
},
+ 'baseline': {
+ 'filepath': 'src/baseline/'
+ },
'feature_shipping_status': {
'filepath': 'src/flags/flag-definitions.h',
},
@@ -91,6 +94,9 @@
'filepath': 'src/base/ieee754\.(cc|h)' \
'|src/base/overflowing-math.h' \
'|LICENSE.fdlibm',
+ },
+ 'regexp': {
+ 'filepath': 'src/.*regexp',
}
},
@@ -110,6 +116,10 @@
'interpreter': [
'rmcilroy@chromium.org',
],
+ 'baseline': [
+ 'leszeks+watch@chromium.org',
+ 'verwaest+watch@chromium.org',
+ ],
'feature_shipping_status': [
'hablich@chromium.org',
],
@@ -142,5 +152,9 @@
'rtoy+watch@chromium.org',
'hongchan+watch@chromium.org'
],
+ 'regexp': [
+ 'jgruber+watch@chromium.org',
+ 'pthier+watch@chromium.org'
+ ],
},
}
diff --git a/chromium/v8/gni/v8.gni b/chromium/v8/gni/v8.gni
index 9325baf996e..8741e86c087 100644
--- a/chromium/v8/gni/v8.gni
+++ b/chromium/v8/gni/v8.gni
@@ -75,13 +75,16 @@ declare_args() {
# executed as standard JavaScript instead.
v8_enable_webassembly = ""
+ # Enable runtime call stats.
+ v8_enable_runtime_call_stats = true
+
# Add fuzzilli fuzzer support.
v8_fuzzilli = false
# Scan the call stack conservatively during garbage collection.
v8_enable_conservative_stack_scanning = false
- v8_enable_google_benchmark = checkout_google_benchmark
+ v8_enable_google_benchmark = false
cppgc_is_standalone = false
}
diff --git a/chromium/v8/include/cppgc/allocation.h b/chromium/v8/include/cppgc/allocation.h
index f4f0e72bd51..7a803cf2cc4 100644
--- a/chromium/v8/include/cppgc/allocation.h
+++ b/chromium/v8/include/cppgc/allocation.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <atomic>
+#include <type_traits>
#include "cppgc/custom-space.h"
#include "cppgc/garbage-collected.h"
@@ -103,6 +104,10 @@ class MakeGarbageCollectedTraitBase
* \returns the memory to construct an object of type T on.
*/
V8_INLINE static void* Allocate(AllocationHandle& handle, size_t size) {
+ static_assert(
+ std::is_base_of<typename T::ParentMostGarbageCollectedType, T>::value,
+ "U of GarbageCollected<U> must be a base of T. Check "
+ "GarbageCollected<T> base class inheritance.");
return SpacePolicy<
typename internal::GCInfoFolding<
T, typename T::ParentMostGarbageCollectedType>::ResultType,
diff --git a/chromium/v8/include/cppgc/cross-thread-persistent.h b/chromium/v8/include/cppgc/cross-thread-persistent.h
index 9cfcd23fdf8..fe61e9acbc3 100644
--- a/chromium/v8/include/cppgc/cross-thread-persistent.h
+++ b/chromium/v8/include/cppgc/cross-thread-persistent.h
@@ -28,19 +28,19 @@ class BasicCrossThreadPersistent final : public PersistentBase,
~BasicCrossThreadPersistent() { Clear(); }
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
const SourceLocation& loc = SourceLocation::Current())
: LocationPolicy(loc) {}
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
std::nullptr_t, const SourceLocation& loc = SourceLocation::Current())
: LocationPolicy(loc) {}
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
SentinelPointer s, const SourceLocation& loc = SourceLocation::Current())
: PersistentBase(s), LocationPolicy(loc) {}
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
T* raw, const SourceLocation& loc = SourceLocation::Current())
: PersistentBase(raw), LocationPolicy(loc) {
if (!IsValid(raw)) return;
@@ -58,7 +58,7 @@ class BasicCrossThreadPersistent final : public PersistentBase,
friend class BasicCrossThreadPersistent;
};
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
UnsafeCtorTag, T* raw,
const SourceLocation& loc = SourceLocation::Current())
: PersistentBase(raw), LocationPolicy(loc) {
@@ -68,14 +68,14 @@ class BasicCrossThreadPersistent final : public PersistentBase,
this->CheckPointer(raw);
}
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
T& raw, const SourceLocation& loc = SourceLocation::Current())
: BasicCrossThreadPersistent(&raw, loc) {}
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>
member,
@@ -94,7 +94,7 @@ class BasicCrossThreadPersistent final : public PersistentBase,
template <typename U, typename OtherWeaknessPolicy,
typename OtherLocationPolicy, typename OtherCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicCrossThreadPersistent( // NOLINT
+ BasicCrossThreadPersistent(
const BasicCrossThreadPersistent<U, OtherWeaknessPolicy,
OtherLocationPolicy,
OtherCheckingPolicy>& other,
@@ -139,7 +139,7 @@ class BasicCrossThreadPersistent final : public PersistentBase,
GetNode()->UpdateOwner(this);
other.SetValue(nullptr);
other.SetNode(nullptr);
- this->CheckPointer(GetValue());
+ this->CheckPointer(Get());
return *this;
}
@@ -236,7 +236,7 @@ class BasicCrossThreadPersistent final : public PersistentBase,
*
* \returns the object.
*/
- operator T*() const { return Get(); } // NOLINT
+ operator T*() const { return Get(); }
/**
* Dereferences the stored object.
diff --git a/chromium/v8/include/cppgc/explicit-management.h b/chromium/v8/include/cppgc/explicit-management.h
index 8fb321c08ca..cdb6af48586 100644
--- a/chromium/v8/include/cppgc/explicit-management.h
+++ b/chromium/v8/include/cppgc/explicit-management.h
@@ -12,9 +12,12 @@
#include "cppgc/type-traits.h"
namespace cppgc {
+
+class HeapHandle;
+
namespace internal {
-V8_EXPORT void FreeUnreferencedObject(void*);
+V8_EXPORT void FreeUnreferencedObject(HeapHandle&, void*);
V8_EXPORT bool Resize(void*, size_t);
} // namespace internal
@@ -30,15 +33,19 @@ namespace subtle {
* to `object` after calling `FreeUnreferencedObject()`. In case such a
* reference exists, it's use results in a use-after-free.
*
+ * To aid in using the API, `FreeUnreferencedObject()` may be called from
+ * destructors on objects that would be reclaimed in the same garbage collection
+ * cycle.
+ *
+ * \param heap_handle The corresponding heap.
* \param object Reference to an object that is of type `GarbageCollected` and
* should be immediately reclaimed.
*/
template <typename T>
-void FreeUnreferencedObject(T* object) {
+void FreeUnreferencedObject(HeapHandle& heap_handle, T& object) {
static_assert(IsGarbageCollectedTypeV<T>,
"Object must be of type GarbageCollected.");
- if (!object) return;
- internal::FreeUnreferencedObject(object);
+ internal::FreeUnreferencedObject(heap_handle, &object);
}
/**
@@ -53,6 +60,8 @@ void FreeUnreferencedObject(T* object) {
* object down, the reclaimed area is not used anymore. Any subsequent use
* results in a use-after-free.
*
+ * The `object` must be live when calling `Resize()`.
+ *
* \param object Reference to an object that is of type `GarbageCollected` and
* should be resized.
* \param additional_bytes Bytes in addition to sizeof(T) that the object should
diff --git a/chromium/v8/include/cppgc/heap-statistics.h b/chromium/v8/include/cppgc/heap-statistics.h
index cf8d6633cc2..2fe6e1ae58a 100644
--- a/chromium/v8/include/cppgc/heap-statistics.h
+++ b/chromium/v8/include/cppgc/heap-statistics.h
@@ -57,7 +57,7 @@ struct HeapStatistics final {
};
/**
- * Stastistics of the freelist (used only in non-large object spaces). For
+ * Statistics of the freelist (used only in non-large object spaces). For
* each bucket in the freelist the statistics record the bucket size, the
* number of freelist entries in the bucket, and the overall allocated memory
* consumed by these freelist entries.
@@ -67,7 +67,7 @@ struct HeapStatistics final {
std::vector<size_t> bucket_size;
/** number of freelist entries per bucket. */
std::vector<size_t> free_count;
- /** memory size concumed by freelist entries per size. */
+ /** memory size consumed by freelist entries per size. */
std::vector<size_t> free_size;
};
diff --git a/chromium/v8/include/cppgc/internal/compiler-specific.h b/chromium/v8/include/cppgc/internal/compiler-specific.h
index c580894b35d..595b6398cb7 100644
--- a/chromium/v8/include/cppgc/internal/compiler-specific.h
+++ b/chromium/v8/include/cppgc/internal/compiler-specific.h
@@ -21,13 +21,13 @@ namespace cppgc {
// [[no_unique_address]] comes in C++20 but supported in clang with -std >=
// c++11.
-#if CPPGC_HAS_CPP_ATTRIBUTE(no_unique_address) // NOLINTNEXTLINE
+#if CPPGC_HAS_CPP_ATTRIBUTE(no_unique_address)
#define CPPGC_NO_UNIQUE_ADDRESS [[no_unique_address]]
#else
#define CPPGC_NO_UNIQUE_ADDRESS
#endif
-#if CPPGC_HAS_ATTRIBUTE(unused) // NOLINTNEXTLINE
+#if CPPGC_HAS_ATTRIBUTE(unused)
#define CPPGC_UNUSED __attribute__((unused))
#else
#define CPPGC_UNUSED
diff --git a/chromium/v8/include/cppgc/internal/pointer-policies.h b/chromium/v8/include/cppgc/internal/pointer-policies.h
index ceb002f02d5..e09b86199f4 100644
--- a/chromium/v8/include/cppgc/internal/pointer-policies.h
+++ b/chromium/v8/include/cppgc/internal/pointer-policies.h
@@ -9,12 +9,15 @@
#include <type_traits>
#include "cppgc/internal/write-barrier.h"
+#include "cppgc/sentinel-pointer.h"
#include "cppgc/source-location.h"
+#include "cppgc/type-traits.h"
#include "v8config.h" // NOLINT(build/include_directory)
namespace cppgc {
namespace internal {
+class HeapBase;
class PersistentRegion;
class CrossThreadPersistentRegion;
@@ -50,11 +53,31 @@ struct NoWriteBarrierPolicy {
class V8_EXPORT EnabledCheckingPolicy {
protected:
- EnabledCheckingPolicy();
- void CheckPointer(const void* ptr);
+ template <typename T>
+ void CheckPointer(const T* ptr) {
+ if (!ptr || (kSentinelPointer == ptr)) return;
+
+ CheckPointersImplTrampoline<T>::Call(this, ptr);
+ }
private:
- void* impl_;
+ void CheckPointerImpl(const void* ptr, bool points_to_payload);
+
+ template <typename T, bool = IsCompleteV<T>>
+ struct CheckPointersImplTrampoline {
+ static void Call(EnabledCheckingPolicy* policy, const T* ptr) {
+ policy->CheckPointerImpl(ptr, false);
+ }
+ };
+
+ template <typename T>
+ struct CheckPointersImplTrampoline<T, true> {
+ static void Call(EnabledCheckingPolicy* policy, const T* ptr) {
+ policy->CheckPointerImpl(ptr, IsGarbageCollectedTypeV<T>);
+ }
+ };
+
+ const HeapBase* heap_ = nullptr;
};
class DisabledCheckingPolicy {
@@ -63,9 +86,11 @@ class DisabledCheckingPolicy {
};
#if V8_ENABLE_CHECKS
-using DefaultCheckingPolicy = EnabledCheckingPolicy;
+using DefaultMemberCheckingPolicy = EnabledCheckingPolicy;
+using DefaultPersistentCheckingPolicy = EnabledCheckingPolicy;
#else
-using DefaultCheckingPolicy = DisabledCheckingPolicy;
+using DefaultMemberCheckingPolicy = DisabledCheckingPolicy;
+using DefaultPersistentCheckingPolicy = DisabledCheckingPolicy;
#endif
class KeepLocationPolicy {
@@ -133,10 +158,10 @@ template <typename T, typename WeaknessPolicy,
class BasicCrossThreadPersistent;
template <typename T, typename WeaknessPolicy,
typename LocationPolicy = DefaultLocationPolicy,
- typename CheckingPolicy = DefaultCheckingPolicy>
+ typename CheckingPolicy = DefaultPersistentCheckingPolicy>
class BasicPersistent;
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
- typename CheckingPolicy = DefaultCheckingPolicy>
+ typename CheckingPolicy = DefaultMemberCheckingPolicy>
class BasicMember;
} // namespace internal
diff --git a/chromium/v8/include/cppgc/member.h b/chromium/v8/include/cppgc/member.h
index 7b76bc4f755..16aed060226 100644
--- a/chromium/v8/include/cppgc/member.h
+++ b/chromium/v8/include/cppgc/member.h
@@ -24,8 +24,11 @@ namespace internal {
// BasicMember on casting to the right type as needed.
class MemberBase {
protected:
+ struct AtomicInitializerTag {};
+
MemberBase() = default;
explicit MemberBase(const void* value) : raw_(value) {}
+ MemberBase(const void* value, AtomicInitializerTag) { SetRawAtomic(value); }
const void** GetRawSlot() const { return &raw_; }
const void* GetRaw() const { return raw_; }
@@ -61,6 +64,20 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
this->CheckPointer(Get());
}
BasicMember(T& raw) : BasicMember(&raw) {} // NOLINT
+ // Atomic ctor. Using the AtomicInitializerTag forces BasicMember to
+ // initialize using atomic assignments. This is required for preventing
+ // data races with concurrent marking.
+ using AtomicInitializerTag = MemberBase::AtomicInitializerTag;
+ BasicMember(std::nullptr_t, AtomicInitializerTag atomic)
+ : MemberBase(nullptr, atomic) {}
+ BasicMember(SentinelPointer s, AtomicInitializerTag atomic)
+ : MemberBase(s, atomic) {}
+ BasicMember(T* raw, AtomicInitializerTag atomic) : MemberBase(raw, atomic) {
+ InitializingWriteBarrier();
+ this->CheckPointer(Get());
+ }
+ BasicMember(T& raw, AtomicInitializerTag atomic)
+ : BasicMember(&raw, atomic) {}
// Copy ctor.
BasicMember(const BasicMember& other) : BasicMember(other.Get()) {}
// Allow heterogeneous construction.
@@ -79,9 +96,8 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
typename OtherCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicMember( // NOLINT
- BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
- OtherCheckingPolicy>&& other) noexcept
+ BasicMember(BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
+ OtherCheckingPolicy>&& other) noexcept
: BasicMember(other.Get()) {
other.Clear();
}
@@ -90,10 +106,9 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
typename PersistentLocationPolicy,
typename PersistentCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicMember( // NOLINT
- const BasicPersistent<U, PersistentWeaknessPolicy,
- PersistentLocationPolicy, PersistentCheckingPolicy>&
- p)
+ BasicMember(const BasicPersistent<U, PersistentWeaknessPolicy,
+ PersistentLocationPolicy,
+ PersistentCheckingPolicy>& p)
: BasicMember(p.Get()) {}
// Copy assignment.
@@ -161,7 +176,7 @@ class BasicMember final : private MemberBase, private CheckingPolicy {
}
explicit operator bool() const { return Get(); }
- operator T*() const { return Get(); } // NOLINT
+ operator T*() const { return Get(); }
T* operator->() const { return Get(); }
T& operator*() const { return *Get(); }
diff --git a/chromium/v8/include/cppgc/persistent.h b/chromium/v8/include/cppgc/persistent.h
index d7aac723c0d..22cda7c6e8f 100644
--- a/chromium/v8/include/cppgc/persistent.h
+++ b/chromium/v8/include/cppgc/persistent.h
@@ -95,7 +95,7 @@ class BasicPersistent final : public PersistentBase,
template <typename U, typename OtherWeaknessPolicy,
typename OtherLocationPolicy, typename OtherCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicPersistent( // NOLINT
+ BasicPersistent(
const BasicPersistent<U, OtherWeaknessPolicy, OtherLocationPolicy,
OtherCheckingPolicy>& other,
const SourceLocation& loc = SourceLocation::Current())
@@ -118,7 +118,7 @@ class BasicPersistent final : public PersistentBase,
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
- BasicPersistent(internal::BasicMember<U, MemberBarrierPolicy, // NOLINT
+ BasicPersistent(internal::BasicMember<U, MemberBarrierPolicy,
MemberWeaknessTag, MemberCheckingPolicy>
member,
const SourceLocation& loc = SourceLocation::Current())
@@ -181,7 +181,7 @@ class BasicPersistent final : public PersistentBase,
}
explicit operator bool() const { return Get(); }
- operator T*() const { return Get(); } // NOLINT
+ operator T*() const { return Get(); }
T* operator->() const { return Get(); }
T& operator*() const { return *Get(); }
diff --git a/chromium/v8/include/cppgc/sentinel-pointer.h b/chromium/v8/include/cppgc/sentinel-pointer.h
index f7915834e5a..b049d1a2b34 100644
--- a/chromium/v8/include/cppgc/sentinel-pointer.h
+++ b/chromium/v8/include/cppgc/sentinel-pointer.h
@@ -14,7 +14,7 @@ namespace internal {
// sentinel is defined by the embedder.
struct SentinelPointer {
template <typename T>
- operator T*() const { // NOLINT
+ operator T*() const {
static constexpr intptr_t kSentinelValue = 1;
return reinterpret_cast<T*>(kSentinelValue);
}
diff --git a/chromium/v8/include/cppgc/type-traits.h b/chromium/v8/include/cppgc/type-traits.h
index 2b50a2164b2..56cd55d61e2 100644
--- a/chromium/v8/include/cppgc/type-traits.h
+++ b/chromium/v8/include/cppgc/type-traits.h
@@ -7,6 +7,7 @@
// This file should stay with minimal dependencies to allow embedder to check
// against Oilpan types without including any other parts.
+#include <cstddef>
#include <type_traits>
namespace cppgc {
@@ -164,6 +165,18 @@ struct IsUntracedMemberType : std::false_type {};
template <typename T>
struct IsUntracedMemberType<T, true> : std::true_type {};
+template <typename T>
+struct IsComplete {
+ private:
+ template <typename U, size_t = sizeof(U)>
+ static std::true_type IsSizeOfKnown(U*);
+ static std::false_type IsSizeOfKnown(...);
+
+ public:
+ static constexpr bool value =
+ decltype(IsSizeOfKnown(std::declval<T*>()))::value;
+};
+
} // namespace internal
/**
@@ -223,6 +236,12 @@ constexpr bool IsWeakMemberTypeV = internal::IsWeakMemberType<T>::value;
template <typename T>
constexpr bool IsWeakV = internal::IsWeak<T>::value;
+/**
+ * Value is true for types that are complete, and false otherwise.
+ */
+template <typename T>
+constexpr bool IsCompleteV = internal::IsComplete<T>::value;
+
} // namespace cppgc
#endif // INCLUDE_CPPGC_TYPE_TRAITS_H_
diff --git a/chromium/v8/include/js_protocol.pdl b/chromium/v8/include/js_protocol.pdl
index 666952f27b9..9c0483ae705 100644
--- a/chromium/v8/include/js_protocol.pdl
+++ b/chromium/v8/include/js_protocol.pdl
@@ -267,7 +267,7 @@ domain Debugger
BreakpointId breakpointId
# Restarts particular call frame from the beginning.
- command restartFrame
+ deprecated command restartFrame
parameters
# Call frame identifier to evaluate on.
CallFrameId callFrameId
@@ -707,6 +707,8 @@ experimental domain HeapProfiler
# when the tracking is stopped.
optional boolean reportProgress
optional boolean treatGlobalObjectsAsRoots
+ # If true, numerical values are included in the snapshot
+ optional boolean captureNumericValue
command takeHeapSnapshot
parameters
@@ -714,6 +716,8 @@ experimental domain HeapProfiler
optional boolean reportProgress
# If true, a raw snapshot without artifical roots will be generated
optional boolean treatGlobalObjectsAsRoots
+ # If true, numerical values are included in the snapshot
+ optional boolean captureNumericValue
event addHeapSnapshotChunk
parameters
@@ -1563,7 +1567,10 @@ domain Runtime
# execution context. If omitted and `executionContextName` is not set,
# the binding is exposed to all execution contexts of the target.
# This parameter is mutually exclusive with `executionContextName`.
- optional ExecutionContextId executionContextId
+ # Deprecated in favor of `executionContextName` due to an unclear use case
+ # and bugs in implementation (crbug.com/1169639). `executionContextId` will be
+ # removed in the future.
+ deprecated optional ExecutionContextId executionContextId
# If specified, the binding is exposed to the executionContext with
# matching name, even for contexts created after the binding is added.
# See also `ExecutionContext.name` and `worldName` parameter to
diff --git a/chromium/v8/include/v8-cppgc.h b/chromium/v8/include/v8-cppgc.h
index fba35f71c9a..745fb04347e 100644
--- a/chromium/v8/include/v8-cppgc.h
+++ b/chromium/v8/include/v8-cppgc.h
@@ -28,6 +28,8 @@ namespace internal {
class CppHeap;
} // namespace internal
+class CustomSpaceStatisticsReceiver;
+
/**
* Describes how V8 wrapper objects maintain references to garbage-collected C++
* objects.
@@ -120,6 +122,16 @@ class V8_EXPORT CppHeap {
cppgc::HeapStatistics::DetailLevel detail_level);
/**
+ * Collects statistics for the given spaces and reports them to the receiver.
+ *
+ * \param custom_spaces a collection of custom space indicies.
+ * \param receiver an object that gets the results.
+ */
+ void CollectCustomSpaceStatisticsAtLastGC(
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver);
+
+ /**
* Enables a detached mode that allows testing garbage collection using
* `cppgc::testing` APIs. Once used, the heap cannot be attached to an
* `Isolate` anymore.
@@ -277,6 +289,26 @@ class V8_EXPORT JSHeapConsistency final {
const TracedReferenceBase& ref);
};
+/**
+ * Provided as input to `CppHeap::CollectCustomSpaceStatisticsAtLastGC()`.
+ *
+ * Its method is invoked with the results of the statistic collection.
+ */
+class CustomSpaceStatisticsReceiver {
+ public:
+ virtual ~CustomSpaceStatisticsReceiver() = default;
+ /**
+ * Reports the size of a space at the last GC. It is called for each space
+ * that was requested in `CollectCustomSpaceStatisticsAtLastGC()`.
+ *
+ * \param space_index The index of the space.
+ * \param bytes The total size of live objects in the space at the last GC.
+ * It is zero if there was no GC yet.
+ */
+ virtual void AllocatedBytes(cppgc::CustomSpaceIndex space_index,
+ size_t bytes) = 0;
+};
+
} // namespace v8
namespace cppgc {
diff --git a/chromium/v8/include/v8-fast-api-calls.h b/chromium/v8/include/v8-fast-api-calls.h
index f8b5acb0934..cdf67decf67 100644
--- a/chromium/v8/include/v8-fast-api-calls.h
+++ b/chromium/v8/include/v8-fast-api-calls.h
@@ -70,8 +70,7 @@
* return GetInternalField<CustomEmbedderType,
* kV8EmbedderWrapperObjectIndex>(wrapper);
* }
- * static void FastMethod(v8::ApiObject receiver_obj, int param) {
- * v8::Object* v8_object = reinterpret_cast<v8::Object*>(&api_object);
+ * static void FastMethod(v8::Local<v8::Object> receiver_obj, int param) {
* CustomEmbedderType* receiver = static_cast<CustomEmbedderType*>(
* receiver_obj->GetAlignedPointerFromInternalField(
* kV8EmbedderWrapperObjectIndex));
@@ -190,10 +189,13 @@
#include <tuple>
#include <type_traits>
+#include "v8.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
+class Isolate;
+
class CTypeInfo {
public:
enum class Type : uint8_t {
@@ -206,6 +208,8 @@ class CTypeInfo {
kFloat32,
kFloat64,
kV8Value,
+ kApiObject, // This will be deprecated once all users have
+ // migrated from v8::ApiObject to v8::Local<v8::Value>.
};
// kCallbackOptionsType is not part of the Type enum
@@ -310,7 +314,7 @@ class V8_EXPORT CFunction {
};
};
-struct ApiObject {
+struct V8_DEPRECATE_SOON("Use v8::Local<v8::Value> instead.") ApiObject {
uintptr_t address;
};
@@ -322,6 +326,14 @@ struct ApiObject {
*/
struct FastApiCallbackOptions {
/**
+ * Creates a new instance of FastApiCallbackOptions for testing purpose. The
+ * returned instance may be filled with mock data.
+ */
+ static FastApiCallbackOptions CreateForTesting(Isolate* isolate) {
+ return {false, {0}};
+ }
+
+ /**
* If the callback wants to signal an error condition or to perform an
* allocation, it must set options.fallback to true and do an early return
* from the fast method. Then V8 checks the value of options.fallback and if
@@ -336,8 +348,12 @@ struct FastApiCallbackOptions {
/**
* The `data` passed to the FunctionTemplate constructor, or `undefined`.
+ * `data_ptr` allows for default constructing FastApiCallbackOptions.
*/
- const ApiObject data;
+ union {
+ uintptr_t data_ptr;
+ v8::Value data;
+ };
};
namespace internal {
@@ -398,16 +414,22 @@ struct TypeInfoHelper {
static constexpr CTypeInfo::Type Type() { return CTypeInfo::Type::Enum; } \
};
-#define BASIC_C_TYPES(V) \
- V(void, kVoid) \
- V(bool, kBool) \
- V(int32_t, kInt32) \
- V(uint32_t, kUint32) \
- V(int64_t, kInt64) \
- V(uint64_t, kUint64) \
- V(float, kFloat32) \
- V(double, kFloat64) \
- V(ApiObject, kV8Value)
+#define BASIC_C_TYPES(V) \
+ V(void, kVoid) \
+ V(bool, kBool) \
+ V(int32_t, kInt32) \
+ V(uint32_t, kUint32) \
+ V(int64_t, kInt64) \
+ V(uint64_t, kUint64) \
+ V(float, kFloat32) \
+ V(double, kFloat64) \
+ V(ApiObject, kApiObject) \
+ V(v8::Local<v8::Value>, kV8Value) \
+ V(v8::Local<v8::Object>, kV8Value)
+
+// ApiObject was a temporary solution to wrap the pointer to the v8::Value.
+// Please use v8::Local<v8::Value> in new code for the arguments and
+// v8::Local<v8::Object> for the receiver, as ApiObject will be deprecated.
BASIC_C_TYPES(SPECIALIZE_GET_TYPE_INFO_HELPER_FOR)
diff --git a/chromium/v8/include/v8-inspector.h b/chromium/v8/include/v8-inspector.h
index a55518e4593..852b39d7252 100644
--- a/chromium/v8/include/v8-inspector.h
+++ b/chromium/v8/include/v8-inspector.h
@@ -105,8 +105,9 @@ class V8_EXPORT V8StackTrace {
virtual StringView topSourceURL() const = 0;
virtual int topLineNumber() const = 0;
virtual int topColumnNumber() const = 0;
- virtual StringView topScriptId() const = 0;
- virtual int topScriptIdAsInteger() const = 0;
+ virtual int topScriptId() const = 0;
+ V8_DEPRECATE_SOON("Use V8::StackTrace::topScriptId() instead.")
+ int topScriptIdAsInteger() const { return topScriptId(); }
virtual StringView topFunctionName() const = 0;
virtual ~V8StackTrace() = default;
@@ -130,6 +131,10 @@ class V8_EXPORT V8InspectorSession {
virtual v8::Local<v8::Value> get(v8::Local<v8::Context>) = 0;
virtual ~Inspectable() = default;
};
+ class V8_EXPORT CommandLineAPIScope {
+ public:
+ virtual ~CommandLineAPIScope() = default;
+ };
virtual void addInspectedObject(std::unique_ptr<Inspectable>) = 0;
// Dispatching protocol messages.
@@ -139,6 +144,9 @@ class V8_EXPORT V8InspectorSession {
virtual std::vector<std::unique_ptr<protocol::Schema::API::Domain>>
supportedDomains() = 0;
+ virtual std::unique_ptr<V8InspectorSession::CommandLineAPIScope>
+ initializeCommandLineAPIScope(int executionContextId) = 0;
+
// Debugger actions.
virtual void schedulePauseOnNextStatement(StringView breakReason,
StringView breakDetails) = 0;
diff --git a/chromium/v8/include/v8-internal.h b/chromium/v8/include/v8-internal.h
index eb18f76504d..f289149d8d0 100644
--- a/chromium/v8/include/v8-internal.h
+++ b/chromium/v8/include/v8-internal.h
@@ -33,6 +33,7 @@ const int kApiSystemPointerSize = sizeof(void*);
const int kApiDoubleSize = sizeof(double);
const int kApiInt32Size = sizeof(int32_t);
const int kApiInt64Size = sizeof(int64_t);
+const int kApiSizetSize = sizeof(size_t);
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
@@ -40,6 +41,13 @@ const int kWeakHeapObjectTag = 3;
const int kHeapObjectTagSize = 2;
const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
+// Tag information for fowarding pointers stored in object headers.
+// 0b00 at the lowest 2 bits in the header indicates that the map word is a
+// forwarding pointer.
+const int kForwardingTag = 0;
+const int kForwardingTagSize = 2;
+const intptr_t kForwardingTagMask = (1 << kForwardingTagSize) - 1;
+
// Tag information for Smi.
const int kSmiTag = 0;
const int kSmiTagSize = 1;
@@ -120,23 +128,28 @@ constexpr bool HeapSandboxIsEnabled() {
using ExternalPointer_t = Address;
-// If the heap sandbox is enabled, these tag values will be XORed with the
+// If the heap sandbox is enabled, these tag values will be ORed with the
// external pointers in the external pointer table to prevent use of pointers of
-// the wrong type.
-enum ExternalPointerTag : Address {
- kExternalPointerNullTag = static_cast<Address>(0ULL),
- kArrayBufferBackingStoreTag = static_cast<Address>(1ULL << 48),
- kTypedArrayExternalPointerTag = static_cast<Address>(2ULL << 48),
- kDataViewDataPointerTag = static_cast<Address>(3ULL << 48),
- kExternalStringResourceTag = static_cast<Address>(4ULL << 48),
- kExternalStringResourceDataTag = static_cast<Address>(5ULL << 48),
- kForeignForeignAddressTag = static_cast<Address>(6ULL << 48),
- kNativeContextMicrotaskQueueTag = static_cast<Address>(7ULL << 48),
- // TODO(v8:10391, saelo): Currently has to be zero so that raw zero values are
- // also nullptr
- kEmbedderDataSlotPayloadTag = static_cast<Address>(0ULL << 48),
+// the wrong type. When a pointer is loaded, it is ANDed with the inverse of the
+// expected type's tag. The tags are constructed in a way that guarantees that a
+// failed type check will result in one or more of the top bits of the pointer
+// to be set, rendering the pointer inacessible. This construction allows
+// performing the type check and removing GC marking bits from the pointer at
+// the same time.
+enum ExternalPointerTag : uint64_t {
+ kExternalPointerNullTag = 0x0000000000000000,
+ kArrayBufferBackingStoreTag = 0x00ff000000000000, // 0b000000011111111
+ kTypedArrayExternalPointerTag = 0x017f000000000000, // 0b000000101111111
+ kDataViewDataPointerTag = 0x01bf000000000000, // 0b000000110111111
+ kExternalStringResourceTag = 0x01df000000000000, // 0b000000111011111
+ kExternalStringResourceDataTag = 0x01ef000000000000, // 0b000000111101111
+ kForeignForeignAddressTag = 0x01f7000000000000, // 0b000000111110111
+ kNativeContextMicrotaskQueueTag = 0x01fb000000000000, // 0b000000111111011
+ kEmbedderDataSlotPayloadTag = 0x01fd000000000000, // 0b000000111111101
};
+constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
+
#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
using PlatformSmiTagging = SmiTagging<kApiInt32Size>;
#else
@@ -177,6 +190,14 @@ V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate);
* depend on functions and constants defined here.
*/
class Internals {
+#ifdef V8_MAP_PACKING
+ V8_INLINE static constexpr internal::Address UnpackMapWord(
+ internal::Address mapword) {
+ // TODO(wenyuzhao): Clear header metadata.
+ return mapword ^ kMapWordXorMask;
+ }
+#endif
+
public:
// These values match non-compiler-dependent values defined within
// the implementation of v8.
@@ -209,8 +230,12 @@ class Internals {
kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
static const int kIsolateFastApiCallTargetOffset =
kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
- static const int kIsolateStackGuardOffset =
+ static const int kIsolateCageBaseOffset =
kIsolateFastApiCallTargetOffset + kApiSystemPointerSize;
+ static const int kIsolateLongTaskStatsCounterOffset =
+ kIsolateCageBaseOffset + kApiSystemPointerSize;
+ static const int kIsolateStackGuardOffset =
+ kIsolateLongTaskStatsCounterOffset + kApiSizetSize;
static const int kIsolateRootsOffset =
kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;
@@ -253,6 +278,17 @@ class Internals {
// incremental GC once the external memory reaches this limit.
static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
+#ifdef V8_MAP_PACKING
+ static const uintptr_t kMapWordMetadataMask = 0xffffULL << 48;
+ // The lowest two bits of mapwords are always `0b10`
+ static const uintptr_t kMapWordSignature = 0b10;
+ // XORing a (non-compressed) map with this mask ensures that the two
+ // low-order bits are 0b10. The 0 at the end makes this look like a Smi,
+ // although real Smis have all lower 32 bits unset. We only rely on these
+ // values passing as Smis in very few places.
+ static const int kMapWordXorMask = 0b11;
+#endif
+
V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
@@ -279,6 +315,9 @@ class Internals {
V8_INLINE static int GetInstanceType(const internal::Address obj) {
typedef internal::Address A;
A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset);
+#ifdef V8_MAP_PACKING
+ map = UnpackMapWord(map);
+#endif
return ReadRawField<uint16_t>(map, kMapInstanceTypeOffset);
}
@@ -329,6 +368,12 @@ class Internals {
return *reinterpret_cast<void* const*>(addr);
}
+ V8_INLINE static void IncrementLongTasksStatsCounter(v8::Isolate* isolate) {
+ internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
+ kIsolateLongTaskStatsCounterOffset;
+ ++(*reinterpret_cast<size_t*>(addr));
+ }
+
V8_INLINE static internal::Address* GetRoot(v8::Isolate* isolate, int index) {
internal::Address addr = reinterpret_cast<internal::Address>(isolate) +
kIsolateRootsOffset +
diff --git a/chromium/v8/include/v8-metrics.h b/chromium/v8/include/v8-metrics.h
index 0217f40d63a..2404cc0a4c3 100644
--- a/chromium/v8/include/v8-metrics.h
+++ b/chromium/v8/include/v8-metrics.h
@@ -5,7 +5,8 @@
#ifndef V8_METRICS_H_
#define V8_METRICS_H_
-#include "v8.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8.h" // NOLINT(build/include_directory)
namespace v8 {
namespace metrics {
@@ -183,6 +184,32 @@ class V8_EXPORT Recorder {
static ContextId GetContextId(Local<Context> context);
};
+/**
+ * Experimental API intended for the LongTasks UKM (crbug.com/1173527).
+ * The Reset() method should be called at the start of a potential
+ * long task. The Get() method returns durations of V8 work that
+ * happened during the task.
+ *
+ * This API is experimental and may be removed/changed in the future.
+ */
+struct V8_EXPORT LongTaskStats {
+ /**
+ * Resets durations of V8 work for the new task.
+ */
+ V8_INLINE static void Reset(Isolate* isolate) {
+ v8::internal::Internals::IncrementLongTasksStatsCounter(isolate);
+ }
+
+ /**
+ * Returns durations of V8 work that happened since the last Reset().
+ */
+ static LongTaskStats Get(Isolate* isolate);
+
+ int64_t gc_full_atomic_wall_clock_duration_us = 0;
+ int64_t gc_full_incremental_wall_clock_duration_us = 0;
+ int64_t gc_young_wall_clock_duration_us = 0;
+};
+
} // namespace metrics
} // namespace v8
diff --git a/chromium/v8/include/v8-profiler.h b/chromium/v8/include/v8-profiler.h
index 85d3f8a4821..9a40cfcf307 100644
--- a/chromium/v8/include/v8-profiler.h
+++ b/chromium/v8/include/v8-profiler.h
@@ -289,8 +289,8 @@ class V8_EXPORT CpuProfilingOptions {
* interval, set via SetSamplingInterval(). If
* zero, the sampling interval will be equal to
* the profiler's sampling interval.
- * \param filter_context Deprecated option to filter by context, currently a
- * no-op.
+ * \param filter_context If specified, profiles will only contain frames
+ * using this context. Other frames will be elided.
*/
CpuProfilingOptions(
CpuProfilingMode mode = kLeafNodeLineNumbers,
@@ -304,9 +304,13 @@ class V8_EXPORT CpuProfilingOptions {
private:
friend class internal::CpuProfile;
+ bool has_filter_context() const { return !filter_context_.IsEmpty(); }
+ void* raw_filter_context() const;
+
CpuProfilingMode mode_;
unsigned max_samples_;
int sampling_interval_us_;
+ CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
};
/**
@@ -492,7 +496,7 @@ class V8_EXPORT HeapGraphNode {
/**
* An interface for exporting data from V8, using "push" model.
*/
-class V8_EXPORT OutputStream { // NOLINT
+class V8_EXPORT OutputStream {
public:
enum WriteResult {
kContinue = 0,
@@ -519,7 +523,6 @@ class V8_EXPORT OutputStream { // NOLINT
}
};
-
/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
@@ -586,7 +589,7 @@ class V8_EXPORT HeapSnapshot {
* An interface for reporting progress and controlling long-running
* activities.
*/
-class V8_EXPORT ActivityControl { // NOLINT
+class V8_EXPORT ActivityControl {
public:
enum ControlOption {
kContinue = 0,
@@ -600,7 +603,6 @@ class V8_EXPORT ActivityControl { // NOLINT
virtual ControlOption ReportProgressValue(int done, int total) = 0;
};
-
/**
* AllocationProfile is a sampled profile of allocations done by the program.
* This is structured as a call-graph.
@@ -900,7 +902,8 @@ class V8_EXPORT HeapProfiler {
const HeapSnapshot* TakeHeapSnapshot(
ActivityControl* control = nullptr,
ObjectNameResolver* global_object_name_resolver = nullptr,
- bool treat_global_objects_as_roots = true);
+ bool treat_global_objects_as_roots = true,
+ bool capture_numeric_value = false);
/**
* Starts tracking of heap objects population statistics. After calling
diff --git a/chromium/v8/include/v8-util.h b/chromium/v8/include/v8-util.h
index 89ec4f6a789..8e4d66153d1 100644
--- a/chromium/v8/include/v8-util.h
+++ b/chromium/v8/include/v8-util.h
@@ -43,7 +43,7 @@ class StdMapTraits {
static bool Empty(Impl* impl) { return impl->empty(); }
static size_t Size(Impl* impl) { return impl->size(); }
- static void Swap(Impl& a, Impl& b) { std::swap(a, b); } // NOLINT
+ static void Swap(Impl& a, Impl& b) { std::swap(a, b); }
static Iterator Begin(Impl* impl) { return impl->begin(); }
static Iterator End(Impl* impl) { return impl->end(); }
static K Key(Iterator it) { return it->first; }
diff --git a/chromium/v8/include/v8-version.h b/chromium/v8/include/v8-version.h
index 4bdb66b2bf6..4ad744f2a84 100644
--- a/chromium/v8/include/v8-version.h
+++ b/chromium/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 9
-#define V8_MINOR_VERSION 1
-#define V8_BUILD_NUMBER 269
-#define V8_PATCH_LEVEL 38
+#define V8_MINOR_VERSION 2
+#define V8_BUILD_NUMBER 230
+#define V8_PATCH_LEVEL 30
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/chromium/v8/include/v8.h b/chromium/v8/include/v8.h
index 6b672ca750c..c4720ca6954 100644
--- a/chromium/v8/include/v8.h
+++ b/chromium/v8/include/v8.h
@@ -596,7 +596,7 @@ template <class T> class PersistentBase {
*/
V8_INLINE uint16_t WrapperClassId() const;
- PersistentBase(const PersistentBase& other) = delete; // NOLINT
+ PersistentBase(const PersistentBase& other) = delete;
void operator=(const PersistentBase&) = delete;
private:
@@ -708,7 +708,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
return *this;
}
template <class S, class M2>
- V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) { // NOLINT
+ V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) {
Copy(that);
return *this;
}
@@ -723,7 +723,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
// TODO(dcarney): this is pretty useless, fix or remove
template <class S>
- V8_INLINE static Persistent<T>& Cast(const Persistent<S>& that) { // NOLINT
+ V8_INLINE static Persistent<T>& Cast(const Persistent<S>& that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -734,7 +734,7 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
// TODO(dcarney): this is pretty useless, fix or remove
template <class S>
- V8_INLINE Persistent<S>& As() const { // NOLINT
+ V8_INLINE Persistent<S>& As() const {
return Persistent<S>::Cast(*this);
}
@@ -803,7 +803,7 @@ class Global : public PersistentBase<T> {
/**
* Pass allows returning uniques from functions, etc.
*/
- Global Pass() { return static_cast<Global&&>(*this); } // NOLINT
+ Global Pass() { return static_cast<Global&&>(*this); }
/*
* For compatibility with Chromium's base::Bind (base::Passed).
@@ -905,8 +905,8 @@ class TracedReferenceBase {
* The exact semantics are:
* - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
* - Non-tracing garbage collections refer to
- * |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should
- * be treated as root or not.
+ * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
+ * be treated as root or not.
*
* Note that the base class cannot be instantiated itself. Choose from
* - TracedGlobal
@@ -1674,10 +1674,12 @@ class V8_EXPORT Module : public Data {
/**
* Evaluates the module and its dependencies.
*
- * If status is kInstantiated, run the module's code. On success, set status
- * to kEvaluated and return the completion value; on failure, set status to
- * kErrored and propagate the thrown exception (which is then also available
- * via |GetException|).
+ * If status is kInstantiated, run the module's code and return a Promise
+ * object. On success, set status to kEvaluated and resolve the Promise with
+ * the completion value; on failure, set status to kErrored and reject the
+ * Promise with the error.
+ *
+ * If IsGraphAsync() is false, the returned Promise is settled.
*/
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
@@ -2424,7 +2426,7 @@ struct SampleInfo {
StateTag vm_state; // Current VM state.
void* external_callback_entry; // External callback address if VM is
// executing an external callback.
- void* top_context; // Incumbent native context address.
+ void* context; // Incumbent native context address.
};
struct MemoryRange {
@@ -3276,7 +3278,7 @@ class V8_EXPORT String : public Name {
*/
bool IsExternalOneByte() const;
- class V8_EXPORT ExternalStringResourceBase { // NOLINT
+ class V8_EXPORT ExternalStringResourceBase {
public:
virtual ~ExternalStringResourceBase() = default;
@@ -3626,10 +3628,9 @@ class V8_EXPORT Symbol : public Name {
/**
* Returns the description string of the symbol, or undefined if none.
*/
+ V8_DEPRECATE_SOON("Use Symbol::Description(isolate)")
Local<Value> Description() const;
-
- V8_DEPRECATED("Use Symbol::Description()")
- Local<Value> Name() const { return Description(); }
+ Local<Value> Description(Isolate* isolate) const;
/**
* Create a symbol. If description is not empty, it will be used as the
@@ -3980,8 +3981,7 @@ class V8_EXPORT Object : public Value {
//
// Returns true on success.
V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
- Local<Context> context, Local<Name> key,
- PropertyDescriptor& descriptor); // NOLINT(runtime/references)
+ Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
Local<Value> key);
@@ -4771,17 +4771,6 @@ class V8_EXPORT Function : public Object {
Local<Value> GetDebugName() const;
/**
- * User-defined name assigned to the "displayName" property of this function.
- * Used to facilitate debugging and profiling of JavaScript code.
- */
- V8_DEPRECATED(
- "Use v8::Object::Get() instead to look up \"displayName\". "
- "V8 and DevTools no longer use \"displayName\" in stack "
- "traces, but the standard \"name\" property. "
- "See http://crbug.com/1177685.")
- Local<Value> GetDisplayName() const;
-
- /**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
*/
@@ -5358,7 +5347,7 @@ class V8_EXPORT ArrayBuffer : public Object {
* Note that it is unsafe to call back into V8 from any of the allocator
* functions.
*/
- class V8_EXPORT Allocator { // NOLINT
+ class V8_EXPORT Allocator {
public:
virtual ~Allocator() = default;
@@ -6510,6 +6499,15 @@ class V8_EXPORT FunctionTemplate : public Template {
SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
const CFunction* c_function = nullptr);
+ /** Creates a function template for multiple overloaded fast API calls.*/
+ static Local<FunctionTemplate> NewWithCFunctionOverloads(
+ Isolate* isolate, FunctionCallback callback = nullptr,
+ Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const MemorySpan<const CFunction>& c_function_overloads = {});
+
/**
* Creates a function template backed/cached by a private property.
*/
@@ -6541,7 +6539,7 @@ class V8_EXPORT FunctionTemplate : public Template {
void SetCallHandler(
FunctionCallback callback, Local<Value> data = Local<Value>(),
SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const CFunction* c_function = nullptr);
+ const MemorySpan<const CFunction>& c_function_overloads = {});
/** Set the predefined length property for the FunctionTemplate. */
void SetLength(int length);
@@ -6602,6 +6600,15 @@ class V8_EXPORT FunctionTemplate : public Template {
*/
bool HasInstance(Local<Value> object);
+ /**
+ * Returns true if the given value is an API object that was constructed by an
+ * instance of this function template (without checking for inheriting
+ * function templates).
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ bool IsLeafTemplateForApiObject(v8::Local<v8::Value> value) const;
+
V8_INLINE static FunctionTemplate* Cast(Data* data);
private:
@@ -7035,7 +7042,7 @@ class V8_EXPORT AccessorSignature : public Data {
/**
* Ignore
*/
-class V8_EXPORT Extension { // NOLINT
+class V8_EXPORT Extension {
public:
// Note that the strings passed into this constructor must live as long
// as the Extension itself.
@@ -7137,6 +7144,11 @@ class V8_EXPORT ResourceConstraints {
/**
* The amount of virtual memory reserved for generated code. This is relevant
* for 64-bit architectures that rely on code range for calls in code.
+ *
+ * When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a shared
+ * process-wide code range that is lazily initialized. This value is used to
+ * configure that shared code range when the first Isolate is
+ * created. Subsequent Isolates ignore this value.
*/
size_t code_range_size_in_bytes() const { return code_range_size_; }
void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; }
@@ -7587,6 +7599,10 @@ using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if WebAssembly exceptions are enabled ---
using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
+// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
+using SharedArrayBufferConstructorEnabledCallback =
+ bool (*)(Local<Context> context);
+
// --- Garbage Collection Callbacks ---
/**
@@ -7908,17 +7924,16 @@ using UnhandledExceptionCallback =
/**
* Interface for iterating through all external resources in the heap.
*/
-class V8_EXPORT ExternalResourceVisitor { // NOLINT
+class V8_EXPORT ExternalResourceVisitor {
public:
virtual ~ExternalResourceVisitor() = default;
virtual void VisitExternalString(Local<String> string) {}
};
-
/**
* Interface for iterating through all the persistent handles in the heap.
*/
-class V8_EXPORT PersistentHandleVisitor { // NOLINT
+class V8_EXPORT PersistentHandleVisitor {
public:
virtual ~PersistentHandleVisitor() = default;
virtual void VisitPersistentHandle(Persistent<Value>* value,
@@ -7936,6 +7951,45 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT
enum class MemoryPressureLevel { kNone, kModerate, kCritical };
/**
+ * Handler for embedder roots on non-unified heap garbage collections.
+ */
+class V8_EXPORT EmbedderRootsHandler {
+ public:
+ virtual ~EmbedderRootsHandler() = default;
+
+ /**
+ * Returns true if the TracedGlobal handle should be considered as root for
+ * the currently running non-tracing garbage collection and false otherwise.
+ * The default implementation will keep all TracedGlobal references as roots.
+ *
+ * If this returns false, then V8 may decide that the object referred to by
+ * such a handle is reclaimed. In that case:
+ * - No action is required if handles are used with destructors, i.e., by just
+ * using |TracedGlobal|.
+ * - When run without destructors, i.e., by using |TracedReference|, V8 calls
+ * |ResetRoot|.
+ *
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. The embedder may use |WrapperClassId()| to
+ * distinguish cases where it wants handles to be treated as roots from not
+ * being treated as roots.
+ */
+ virtual bool IsRoot(const v8::TracedReference<v8::Value>& handle) = 0;
+ virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) = 0;
+
+ /**
+ * Used in combination with |IsRoot|. Called by V8 when an
+ * object that is backed by a handle is reclaimed by a non-tracing garbage
+ * collection. It is up to the embedder to reset the original handle.
+ *
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. It is up to the embedder to find the original
+ * handle via the object or class id.
+ */
+ virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
+};
+
+/**
* Interface for tracing through the embedder heap. During a V8 garbage
* collection, V8 collects hidden fields of all potential wrappers, and at the
* end of its marking phase iterates the collection and asks the embedder to
@@ -7997,6 +8051,9 @@ class V8_EXPORT EmbedderHeapTracer {
/**
* Called by the embedder to notify V8 of an empty execution stack.
*/
+ V8_DEPRECATE_SOON(
+ "This call only optimized internal caches which V8 is able to figure out "
+ "on its own now.")
void NotifyEmptyEmbedderStack();
/**
@@ -8060,34 +8117,14 @@ class V8_EXPORT EmbedderHeapTracer {
void FinalizeTracing();
/**
- * Returns true if the TracedGlobal handle should be considered as root for
- * the currently running non-tracing garbage collection and false otherwise.
- * The default implementation will keep all TracedGlobal references as roots.
- *
- * If this returns false, then V8 may decide that the object referred to by
- * such a handle is reclaimed. In that case:
- * - No action is required if handles are used with destructors, i.e., by just
- * using |TracedGlobal|.
- * - When run without destructors, i.e., by using
- * |TracedReference|, V8 calls |ResetHandleInNonTracingGC|.
- *
- * Note that the |handle| is different from the handle that the embedder holds
- * for retaining the object. The embedder may use |WrapperClassId()| to
- * distinguish cases where it wants handles to be treated as roots from not
- * being treated as roots.
+ * See documentation on EmbedderRootsHandler.
*/
virtual bool IsRootForNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
/**
- * Used in combination with |IsRootForNonTracingGC|. Called by V8 when an
- * object that is backed by a handle is reclaimed by a non-tracing garbage
- * collection. It is up to the embedder to reset the original handle.
- *
- * Note that the |handle| is different from the handle that the embedder holds
- * for retaining the object. It is up to the embedder to find the original
- * handle via the object or class id.
+ * See documentation on EmbedderRootsHandler.
*/
virtual void ResetHandleInNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
@@ -8546,6 +8583,7 @@ class V8_EXPORT Isolate {
kWasmBulkMemory = 109, // Unused.
kWasmMultiValue = 110,
kWasmExceptionHandling = 111,
+ kInvalidatedMegaDOMProtector = 112,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@@ -8882,6 +8920,17 @@ class V8_EXPORT Isolate {
Local<Context> GetIncumbentContext();
/**
+ * Schedules a v8::Exception::Error with the given message.
+ * See ThrowException for more details. Templatized to provide compile-time
+ * errors in case of too long strings (see v8::String::NewFromUtf8Literal).
+ */
+ template <int N>
+ Local<Value> ThrowError(const char (&message)[N]) {
+ return ThrowError(String::NewFromUtf8Literal(this, message));
+ }
+ Local<Value> ThrowError(Local<String> message);
+
+ /**
* Schedules an exception to be thrown when returning to JavaScript. When an
* exception has been scheduled it is illegal to invoke any JavaScript
* operation; the caller must return immediately and only after the exception
@@ -8926,6 +8975,18 @@ class V8_EXPORT Isolate {
EmbedderHeapTracer* GetEmbedderHeapTracer();
/**
+ * Sets an embedder roots handle that V8 should consider when performing
+ * non-unified heap garbage collections.
+ *
+ * Using only EmbedderHeapTracer automatically sets up a default handler.
+ * The intended use case is for setting a custom handler after invoking
+ * `AttachCppHeap()`.
+ *
+ * V8 does not take ownership of the handler.
+ */
+ void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);
+
+ /**
* Attaches a managed C++ heap as an extension to the JavaScript heap. The
* embedder maintains ownership of the CppHeap. At most one C++ heap can be
* attached to V8.
@@ -9494,6 +9555,9 @@ class V8_EXPORT Isolate {
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
+ void SetSharedArrayBufferConstructorEnabledCallback(
+ SharedArrayBufferConstructorEnabledCallback callback);
+
/**
* This function can be called by the embedder to signal V8 that the dynamic
* enabling of features has finished. V8 can now set up dynamically added
@@ -9868,6 +9932,9 @@ class V8_EXPORT V8 {
* Notifies V8 that the process is cross-origin-isolated, which enables
* defining the SharedArrayBuffer function on the global object of Contexts.
*/
+ V8_DEPRECATED(
+ "Use the command line argument --enable-sharedarraybuffer-per-context "
+ "together with SetSharedArrayBufferConstructorEnabledCallback")
static void SetIsCrossOriginIsolated();
private:
@@ -10581,6 +10648,18 @@ class V8_EXPORT Context : public Data {
void SetContinuationPreservedEmbedderData(Local<Value> context);
/**
+ * Set or clear hooks to be invoked for promise lifecycle operations.
+ * To clear a hook, set it to an empty v8::Function. Each function will
+ * receive the observed promise as the first argument. If a chaining
+ * operation is used on a promise, the init will additionally receive
+ * the parent promise as the second argument.
+ */
+ void SetPromiseHooks(Local<Function> init_hook,
+ Local<Function> before_hook,
+ Local<Function> after_hook,
+ Local<Function> resolve_hook);
+
+ /**
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.
*/
diff --git a/chromium/v8/include/v8config.h b/chromium/v8/include/v8config.h
index acd34d7a1f2..c1bb691f878 100644
--- a/chromium/v8/include/v8config.h
+++ b/chromium/v8/include/v8config.h
@@ -310,10 +310,6 @@ path. Add it with -I<path> to the command line
// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html
# define V8_HAS_COMPUTED_GOTO 1
-// Whether constexpr has full C++14 semantics, in particular that non-constexpr
-// code is allowed as long as it's not executed for any constexpr instantiation.
-# define V8_HAS_CXX14_CONSTEXPR 1
-
#elif defined(__GNUC__)
# define V8_CC_GNU 1
@@ -336,7 +332,10 @@ path. Add it with -I<path> to the command line
# define V8_HAS_ATTRIBUTE_UNUSED 1
# define V8_HAS_ATTRIBUTE_VISIBILITY 1
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT (!V8_CC_INTEL)
-# define V8_HAS_CPP_ATTRIBUTE_NODISCARD (V8_HAS_CPP_ATTRIBUTE(nodiscard))
+
+// [[nodiscard]] does not work together with with
+// __attribute__((visibility(""))) on GCC 7.4 which is why there is no define
+// for V8_HAS_CPP_ATTRIBUTE_NODISCARD. See https://crbug.com/v8/11707.
# define V8_HAS_BUILTIN_ASSUME_ALIGNED 1
# define V8_HAS_BUILTIN_CLZ 1
@@ -348,11 +347,6 @@ path. Add it with -I<path> to the command line
// GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html
#define V8_HAS_COMPUTED_GOTO 1
-// Whether constexpr has full C++14 semantics, in particular that non-constexpr
-// code is allowed as long as it's not executed for any constexpr instantiation.
-// GCC only supports this since version 6.
-# define V8_HAS_CXX14_CONSTEXPR (V8_GNUC_PREREQ(6, 0, 0))
-
#endif
#if defined(_MSC_VER)
diff --git a/chromium/v8/infra/mb/mb_config.pyl b/chromium/v8/infra/mb/mb_config.pyl
index c87192896ce..9c0c933cda7 100644
--- a/chromium/v8/infra/mb/mb_config.pyl
+++ b/chromium/v8/infra/mb/mb_config.pyl
@@ -65,6 +65,7 @@
'V8 Linux64 - debug builder': 'debug_x64',
'V8 Linux64 - dict tracking - debug - builder': 'debug_x64_dict_tracking_trybot',
'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom',
+ 'V8 Linux64 - heap sandbox - debug - builder': 'debug_x64_heap_sandbox',
'V8 Linux64 - internal snapshot': 'release_x64_internal',
'V8 Linux64 - debug - header includes': 'debug_x64_header_includes',
'V8 Linux64 - shared': 'release_x64_shared_verify_heap',
@@ -101,6 +102,7 @@
# FYI.
'V8 iOS - sim': 'release_x64_ios_simulator',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
+ 'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation',
'V8 Linux64 - pointer compression': 'release_x64_pointer_compression',
'V8 Linux64 - pointer compression without dchecks':
'release_x64_pointer_compression_without_dchecks',
@@ -136,9 +138,13 @@
'V8 Clusterfuzz Linux64 - debug builder': 'debug_x64',
'V8 Clusterfuzz Linux64 ASAN no inline - release builder':
'release_x64_asan_symbolized_verify_heap',
+ 'V8 Clusterfuzz Linux ASAN no inline - release builder':
+ 'release_x86_asan_symbolized_verify_heap',
'V8 Clusterfuzz Linux64 ASAN - debug builder': 'debug_x64_asan',
+ 'V8 Clusterfuzz Linux ASAN - debug builder': 'debug_x86_asan',
'V8 Clusterfuzz Linux64 ASAN arm64 - debug builder':
'debug_simulate_arm64_asan',
+ 'V8 Clusterfuzz Linux - debug builder': 'debug_x86',
'V8 Clusterfuzz Linux ASAN arm - debug builder':
'debug_simulate_arm_asan',
'V8 Clusterfuzz Linux64 CFI - release builder':
@@ -202,19 +208,23 @@
'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel_ng': 'release_x86_gcc_minimal_symbols',
'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap',
+ 'v8_linux_vtunejit': 'debug_x86_vtunejit',
'v8_linux64_arm64_pointer_compression_rel_ng':
'release_simulate_arm64_pointer_compression',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot',
'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom',
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
+ 'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
+ 'v8_linux64_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox',
'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_nodcheck_rel_ng': 'release_x64',
'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto',
'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression',
'v8_linux64_rel_ng': 'release_x64_test_features_trybot',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
+ 'v8_linux64_single_generation_dbg_ng': 'debug_x64_single_generation',
'v8_linux64_no_wasm_compile_rel': 'release_x64_webassembly_disabled',
'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa',
'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols',
@@ -464,8 +474,9 @@
'release_x64_cfi_clusterfuzz': [
'release_bot', 'x64', 'cfi_clusterfuzz'],
'release_x64_fuzzilli': [
- 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap',
- 'v8_verify_csa', 'v8_enable_verify_predictable', 'fuzzilli'],
+ 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks',
+ 'v8_verify_heap', 'v8_verify_csa', 'v8_enable_verify_predictable',
+ 'fuzzilli'],
'release_x64_msvc': [
'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'],
'release_x64_correctness_fuzzer' : [
@@ -475,8 +486,8 @@
'release_x64_fuchsia_trybot': [
'release_trybot', 'x64', 'fuchsia'],
'release_x64_gcc_coverage': [
- 'release_bot_no_goma', 'x64', 'coverage', 'gcc', 'no_custom_libcxx',
- 'no_sysroot'],
+ 'release_bot_no_goma', 'x64', 'coverage', 'gcc', 'lld',
+ 'no_custom_libcxx', 'no_sysroot'],
'release_x64_ios_simulator': [
'release_bot', 'x64', 'ios_simulator'],
'release_x64_internal': [
@@ -529,13 +540,17 @@
'debug_x64_fuchsia': [
'debug_bot', 'x64', 'fuchsia'],
'debug_x64_gcc': [
- 'debug_bot_no_goma', 'x64', 'gcc', 'v8_check_header_includes'],
+ 'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'v8_check_header_includes'],
'debug_x64_header_includes': [
'debug_bot', 'x64', 'v8_check_header_includes'],
+ 'debug_x64_heap_sandbox': [
+ 'debug_bot', 'x64', 'v8_enable_heap_sandbox'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
'debug_x64_perfetto': [
'debug_bot', 'x64', 'perfetto'],
+ 'debug_x64_single_generation': [
+ 'debug_bot', 'x64', 'v8_enable_single_generation'],
'debug_x64_trybot': [
'debug_trybot', 'x64'],
'debug_x64_dict_tracking_trybot': [
@@ -548,6 +563,8 @@
# Debug configs for x86.
'debug_x86': [
'debug_bot', 'x86'],
+ 'debug_x86_asan': [
+ 'debug_bot', 'x86', 'asan', 'lsan'],
'debug_x86_minimal_symbols': [
'debug_bot', 'x86', 'minimal_symbols'],
'debug_x86_no_i18n': [
@@ -560,10 +577,13 @@
'debug', 'x86', 'goma', 'v8_enable_slow_dchecks', 'v8_full_debug'],
# Release configs for x86.
+ 'release_x86_asan_symbolized_verify_heap': [
+ 'release_bot', 'x86', 'asan', 'lsan', 'symbolized',
+ 'v8_verify_heap'],
'release_x86_gcc': [
- 'release_bot_no_goma', 'x86', 'gcc', 'v8_check_header_includes'],
+ 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'v8_check_header_includes'],
'release_x86_gcc_minimal_symbols': [
- 'release_bot_no_goma', 'x86', 'gcc', 'minimal_symbols',
+ 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'minimal_symbols',
'v8_check_header_includes'],
'release_x86_gcmole': [
'release_bot', 'x86', 'gcmole'],
@@ -645,7 +665,7 @@
'debug_bot': {
'mixins': [
'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
- 'v8_optimized_debug'],
+ 'v8_optimized_debug', 'v8_enable_google_benchmark'],
},
'debug_bot_no_goma': {
@@ -701,6 +721,10 @@
'gn_args': 'target_cpu="x64" target_os="ios"',
},
+ 'lld': {
+ 'gn_args': 'use_lld=true',
+ },
+
'lsan': {
'mixins': ['v8_enable_test_features'],
'gn_args': 'is_lsan=true',
@@ -745,7 +769,7 @@
},
'release_bot': {
- 'mixins': ['release', 'static', 'goma'],
+ 'mixins': ['release', 'static', 'goma', 'v8_enable_google_benchmark'],
},
'release_bot_no_goma': {
@@ -834,6 +858,10 @@
'gn_args': 'v8_control_flow_integrity=true',
},
+ 'v8_enable_heap_sandbox': {
+ 'gn_args': 'v8_enable_heap_sandbox=true',
+ },
+
'v8_enable_lite_mode': {
'gn_args': 'v8_enable_lite_mode=true',
},
@@ -842,6 +870,10 @@
'gn_args': 'v8_enable_slow_dchecks=true',
},
+ 'v8_enable_google_benchmark': {
+ 'gn_args': 'v8_enable_google_benchmark=true',
+ },
+
'webassembly_disabled': {
'gn_args': 'v8_enable_webassembly=false',
},
@@ -853,7 +885,10 @@
'v8_disable_pointer_compression': {
'gn_args': 'v8_enable_pointer_compression=false',
},
-
+ 'v8_enable_single_generation': {
+ 'gn_args': 'v8_enable_single_generation=true '
+ 'v8_disable_write_barriers=true',
+ },
'v8_enable_test_features': {
'gn_args': 'v8_enable_test_features=true',
},
diff --git a/chromium/v8/infra/testing/builders.pyl b/chromium/v8/infra/testing/builders.pyl
index fc0d1c55b11..8fe8872ed9b 100644
--- a/chromium/v8/infra/testing/builders.pyl
+++ b/chromium/v8/infra/testing/builders.pyl
@@ -49,7 +49,7 @@
# Fuchsia
'v8_fuchsia_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'fuchsia-unittests'},
@@ -57,7 +57,7 @@
},
'V8 Fuchsia': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'fuchsia-unittests'},
@@ -68,7 +68,7 @@
'v8_linux_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -102,7 +102,7 @@
},
'v8_linux_gc_stress_dbg_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2},
@@ -111,7 +111,7 @@
},
'v8_linux_gcc_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
@@ -120,7 +120,7 @@
'v8_linux_nodcheck_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -135,7 +135,7 @@
},
'v8_linux_noi18n_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
@@ -146,7 +146,7 @@
'v8_linux_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -166,7 +166,7 @@
'v8_linux_optional_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
# Code serializer.
@@ -268,7 +268,7 @@
},
'v8_linux_verify_csa_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -278,7 +278,7 @@
# Linux32 with arm simulators
'v8_linux_arm_dbg_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -291,7 +291,7 @@
},
'v8_linux_arm_lite_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
@@ -299,7 +299,7 @@
},
'v8_linux_arm_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access', 'shards': 2},
@@ -314,7 +314,7 @@
# Linux64
'v8_linux64_asan_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'test262', 'shards': 7},
@@ -325,7 +325,7 @@
},
'v8_linux64_cfi_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -338,7 +338,7 @@
'v8_linux64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -360,7 +360,7 @@
'v8_linux64_dict_tracking_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -368,26 +368,14 @@
},
'v8_linux64_fuzzilli_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
# TODO(almuthanna): Add a new test config for the fuzzilli suite.
'tests': [],
},
- 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': {
- 'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {
- 'name': 'mjsunit',
- 'test_args': ['--gc-stress', '--no-harness'],
- 'shards': 3,
- },
- ],
- },
'v8_linux64_fyi_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
# Infra staging.
@@ -401,11 +389,41 @@
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Concurrent inlining.
{'name': 'mjsunit', 'variant': 'concurrent_inlining'},
+ # Wasm write protect code space.
+ {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
+ ],
+ },
+ 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {
+ 'name': 'mjsunit',
+ 'test_args': ['--gc-stress', '--no-harness'],
+ 'shards': 3,
+ },
+ ],
+ },
+ 'v8_linux64_gcov_coverage': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing'},
+ ],
+ },
+ 'v8_linux64_heap_sandbox_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 2},
],
},
'v8_linux64_msan_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'test262', 'variant': 'default', 'shards': 2},
@@ -415,7 +433,7 @@
'v8_linux64_nodcheck_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -438,7 +456,7 @@
},
'v8_linux64_perfetto_dbg_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -446,7 +464,15 @@
},
'v8_linux64_pointer_compression_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
+ 'v8_linux64_single_generation_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -455,7 +481,7 @@
'v8_linux64_rel_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
# TODO(machenbach): Add benchmarks.
@@ -475,7 +501,7 @@
},
'v8_linux64_tsan_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -489,7 +515,7 @@
},
'v8_linux64_tsan_no_cm_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{
@@ -507,7 +533,7 @@
},
'v8_linux64_tsan_isolates_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
@@ -515,7 +541,7 @@
},
'v8_linux64_ubsan_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -523,7 +549,7 @@
},
'v8_linux64_verify_csa_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -533,7 +559,7 @@
# Linux64 with arm64 simulators
'v8_linux_arm64_dbg_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -546,7 +572,7 @@
},
'v8_linux_arm64_gc_stress_dbg_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12},
@@ -554,7 +580,7 @@
},
'v8_linux_arm64_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access', 'shards': 4},
@@ -567,7 +593,7 @@
},
'v8_linux_arm64_cfi_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'test262', 'variant': 'default', 'shards': 3},
@@ -576,7 +602,7 @@
},
'v8_linux64_arm64_pointer_compression_rel_ng_triggered': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
@@ -586,7 +612,7 @@
# Linux64 with RISC-V simulators
'v8_linux64_riscv64_rel_ng_triggered': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -598,7 +624,7 @@
'swarming_dimensions' : {
'cores': '8',
'cpu': 'armv7l-32-ODROID-XU4',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu',
},
'swarming_task_attrs': {
# Use same prio as CI due to limited resources.
@@ -760,6 +786,7 @@
},
'tests': [
{'name': 'v8testing', 'shards': 8},
+ {'name': 'v8testing', 'variant': 'future', 'shards': 2},
],
},
'v8_mac_arm64_sim_dbg_ng_triggered': {
@@ -770,6 +797,7 @@
},
'tests': [
{'name': 'v8testing', 'shards': 8},
+ {'name': 'v8testing', 'variant': 'future', 'shards': 2},
],
},
'v8_mac_arm64_sim_nodcheck_rel_ng_triggered': {
@@ -780,6 +808,7 @@
},
'tests': [
{'name': 'v8testing', 'shards': 8},
+ {'name': 'v8testing', 'variant': 'future', 'shards': 2},
],
},
##############################################################################
@@ -788,7 +817,7 @@
# Main.
'V8 Fuzzer': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -802,7 +831,7 @@
'V8 Linux': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -873,7 +902,7 @@
},
'V8 Linux - arm64 - sim - CFI': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'test262', 'variant': 'default', 'shards': 3},
@@ -882,7 +911,7 @@
},
'V8 Linux - arm64 - sim - MSAN': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'test262', 'variant': 'default', 'shards': 3},
@@ -892,7 +921,7 @@
'V8 Linux - debug': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -991,7 +1020,7 @@
},
'V8 Linux - full debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
@@ -999,7 +1028,7 @@
},
'V8 Linux - gc stress': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{
@@ -1017,7 +1046,7 @@
},
'V8 Linux - noi18n - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mozilla', 'variant': 'default'},
@@ -1027,7 +1056,7 @@
},
'V8 Linux - predictable': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -1037,7 +1066,7 @@
},
'V8 Linux - shared': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1047,7 +1076,7 @@
},
'V8 Linux - verify csa': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
@@ -1055,7 +1084,7 @@
},
'V8 Linux gcc': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
@@ -1064,7 +1093,7 @@
'V8 Linux64': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -1108,7 +1137,7 @@
},
'V8 Linux64 - cfi': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -1120,7 +1149,7 @@
},
'V8 Linux64 - custom snapshot - debug': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit', 'test_args': ['--no-harness']},
@@ -1129,7 +1158,7 @@
'V8 Linux64 - debug': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -1171,7 +1200,7 @@
'V8 Linux64 - dict tracking - debug': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 3},
@@ -1179,7 +1208,7 @@
},
'V8 Linux64 - debug - fyi': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
# Infra staging.
@@ -1193,11 +1222,13 @@
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Concurrent inlining.
{'name': 'mjsunit', 'variant': 'concurrent_inlining'},
+ # Wasm write protect code space.
+ {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 - debug - perfetto': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1208,9 +1239,22 @@
{'name': 'v8testing', 'shards': 2},
],
},
+ 'V8 Linux64 - debug - single generation': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'V8 Linux64 - fyi': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
# Infra staging.
@@ -1224,19 +1268,29 @@
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Concurrent inlining.
{'name': 'mjsunit', 'variant': 'concurrent_inlining'},
+ # Wasm write protect code space.
+ {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 - gcov coverage': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
],
},
+ 'V8 Linux64 - heap sandbox - debug': {
+ 'swarming_dimensions' : {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 2},
+ ],
+ },
'V8 Linux64 - internal snapshot': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
@@ -1244,7 +1298,7 @@
},
'V8 Linux64 - pointer compression': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'shards': 2},
@@ -1252,7 +1306,7 @@
},
'V8 Linux64 - shared': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1262,7 +1316,7 @@
},
'V8 Linux64 - verify csa': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing'},
@@ -1270,7 +1324,7 @@
},
'V8 Linux64 ASAN': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'test262', 'shards': 5},
@@ -1281,7 +1335,7 @@
},
'V8 Linux64 GC Stress - custom snapshot': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{
@@ -1293,7 +1347,7 @@
},
'V8 Linux64 TSAN': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks'},
@@ -1307,7 +1361,7 @@
},
'V8 Linux64 TSAN - stress-incremental-marking': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1338,7 +1392,7 @@
},
'V8 Linux64 TSAN - isolates': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7},
@@ -1346,7 +1400,7 @@
},
'V8 Linux64 TSAN - no-concurrent-marking': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{
@@ -1364,7 +1418,7 @@
},
'V8 Linux64 UBSan': {
'swarming_dimensions' : {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mozilla'},
@@ -1427,8 +1481,8 @@
'priority': 35,
},
'tests': [
- {'name': 'd8testing'},
- {'name': 'd8testing', 'variant': 'extra'},
+ {'name': 'v8testing'},
+ {'name': 'v8testing', 'variant': 'extra'},
],
},
'V8 Mac - arm64 - debug': {
@@ -1460,6 +1514,7 @@
},
'tests': [
{'name': 'v8testing', 'shards': 8},
+ {'name': 'v8testing', 'variant': 'future', 'shards': 2},
],
},
'V8 Mac - arm64 - sim - release': {
@@ -1475,6 +1530,7 @@
},
'tests': [
{'name': 'v8testing', 'shards': 8},
+ {'name': 'v8testing', 'variant': 'future', 'shards': 2},
],
},
'V8 Win32': {
@@ -1558,7 +1614,7 @@
'swarming_dimensions': {
'cores': '8',
'cpu': 'armv7l-32-ODROID-XU4',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu',
},
'swarming_task_attrs': {
'expiration': 21600,
@@ -1587,7 +1643,7 @@
'swarming_dimensions': {
'cores': '8',
'cpu': 'armv7l-32-ODROID-XU4',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu',
},
'swarming_task_attrs': {
'expiration': 21600,
@@ -1615,7 +1671,7 @@
'swarming_dimensions': {
'cores': '8',
'cpu': 'armv7l-32-ODROID-XU4',
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu',
},
'swarming_task_attrs': {
'expiration': 21600,
@@ -1633,7 +1689,7 @@
},
'V8 Linux - arm - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -1682,7 +1738,7 @@
},
'V8 Linux - arm - sim - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access', 'shards': 6},
@@ -1735,7 +1791,7 @@
},
'V8 Linux - arm - sim - lite': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 2},
@@ -1743,7 +1799,7 @@
},
'V8 Linux - arm - sim - lite - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
@@ -1751,7 +1807,7 @@
},
'V8 Linux - arm64 - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'mjsunit_sp_frame_access'},
@@ -1764,7 +1820,7 @@
},
'V8 Linux - arm64 - sim - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
# TODO(machenbach): Remove longer timeout when this builder scales better.
'swarming_task_attrs': {
@@ -1781,7 +1837,7 @@
},
'V8 Linux - arm64 - sim - gc stress': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1798,7 +1854,7 @@
},
'V8 Linux - mips64el - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1812,7 +1868,7 @@
},
'V8 Linux - mipsel - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1826,7 +1882,7 @@
},
'V8 Linux - ppc64 - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1839,7 +1895,7 @@
},
'V8 Linux - riscv64 - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1852,7 +1908,7 @@
},
'V8 Linux - s390x - sim': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1865,7 +1921,7 @@
},
'V8 Linux64 - arm64 - sim - pointer compression': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 14400,
@@ -1880,7 +1936,7 @@
# Clusterfuzz.
'V8 NumFuzz': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 13800,
@@ -1897,7 +1953,7 @@
},
'V8 NumFuzz - TSAN': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 13800,
@@ -1945,7 +2001,7 @@
},
'V8 NumFuzz - debug': {
'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
+ 'os': 'Ubuntu-18.04',
},
'swarming_task_attrs': {
'expiration': 13800,
@@ -1998,282 +2054,4 @@
},
],
},
- ##############################################################################
- # Branches.
- 'V8 Linux - beta branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux - beta branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 3},
- ],
- },
- 'V8 Linux - stable branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux - stable branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 3},
- ],
- },
- 'V8 Linux - previous branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux - previous branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 3},
- ],
- },
- 'V8 Linux64 - beta branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux64 - beta branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 3},
- ],
- },
- 'V8 Linux64 - stable branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux64 - stable branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 3},
- ],
- },
- 'V8 Linux64 - previous branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing'},
- ],
- },
- 'V8 Linux64 - previous branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 3},
- ],
- },
- 'V8 arm - sim - beta branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 4},
- ],
- },
- 'V8 arm - sim - beta branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla', 'shards': 2},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 10},
- ],
- },
- 'V8 arm - sim - stable branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 4},
- ],
- },
- 'V8 arm - sim - stable branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla', 'shards': 2},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 10},
- ],
- },
- 'V8 arm - sim - previous branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla'},
- {'name': 'test262', 'variant': 'default'},
- {'name': 'v8testing', 'shards': 4},
- ],
- },
- 'V8 arm - sim - previous branch - debug': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'mozilla', 'shards': 2},
- {'name': 'test262', 'variant': 'default', 'shards': 2},
- {'name': 'v8testing', 'shards': 10},
- ],
- },
- 'V8 mips64el - sim - beta branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 mips64el - sim - stable branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 mips64el - sim - previous branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 mipsel - sim - beta branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 4},
- ],
- },
- 'V8 mipsel - sim - stable branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 4},
- ],
- },
- 'V8 mipsel - sim - previous branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'v8testing', 'shards': 4},
- ],
- },
- 'V8 ppc64 - sim - beta branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 ppc64 - sim - stable branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 ppc64 - sim - previous branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 s390x - sim - beta branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 s390x - sim - stable branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
- 'V8 s390x - sim - previous branch': {
- 'swarming_dimensions': {
- 'os': 'Ubuntu-16.04',
- },
- 'tests': [
- {'name': 'unittests'},
- ],
- },
}
diff --git a/chromium/v8/samples/OWNERS b/chromium/v8/samples/OWNERS
index 9c4f2439aa5..6df8720dc57 100644
--- a/chromium/v8/samples/OWNERS
+++ b/chromium/v8/samples/OWNERS
@@ -1,2 +1,2 @@
mathias@chromium.org
-yangguo@chromium.org
+cbruni@chromium.org
diff --git a/chromium/v8/samples/shell.cc b/chromium/v8/samples/shell.cc
index e844ca51bf3..7de600a88fd 100644
--- a/chromium/v8/samples/shell.cc
+++ b/chromium/v8/samples/shell.cc
@@ -147,20 +147,17 @@ void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
// the argument into a JavaScript string.
void Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- args.GetIsolate()->ThrowException(
- v8::String::NewFromUtf8Literal(args.GetIsolate(), "Bad parameters"));
+ args.GetIsolate()->ThrowError("Bad parameters");
return;
}
v8::String::Utf8Value file(args.GetIsolate(), args[0]);
if (*file == NULL) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(), "Error loading file"));
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
v8::Local<v8::String> source;
if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(), "Error loading file"));
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
@@ -175,19 +172,16 @@ void Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::HandleScope handle_scope(args.GetIsolate());
v8::String::Utf8Value file(args.GetIsolate(), args[i]);
if (*file == NULL) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(), "Error loading file"));
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
v8::Local<v8::String> source;
if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(), "Error loading file"));
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
if (!ExecuteString(args.GetIsolate(), source, args[i], false, false)) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(), "Error executing file"));
+ args.GetIsolate()->ThrowError("Error executing file");
return;
}
}
diff --git a/chromium/v8/src/DEPS b/chromium/v8/src/DEPS
index c3394e4b7a8..3c5dca663f6 100644
--- a/chromium/v8/src/DEPS
+++ b/chromium/v8/src/DEPS
@@ -5,6 +5,7 @@ include_rules = [
"+src/asmjs/asm-js.h",
"-src/baseline",
"+src/baseline/baseline.h",
+ "+src/baseline/baseline-osr-inl.h",
"+src/baseline/bytecode-offset-iterator.h",
"-src/bigint",
"+src/bigint/bigint.h",
@@ -14,6 +15,7 @@ include_rules = [
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/basic-memory-chunk.h",
+ "+src/heap/code-range.h",
"+src/heap/combined-heap.h",
"+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
diff --git a/chromium/v8/src/api/api-arguments-inl.h b/chromium/v8/src/api/api-arguments-inl.h
index 57b533c7c9f..4edd0dad29d 100644
--- a/chromium/v8/src/api/api-arguments-inl.h
+++ b/chromium/v8/src/api/api-arguments-inl.h
@@ -96,8 +96,7 @@ inline JSReceiver FunctionCallbackArguments::holder() {
Handle<InterceptorInfo> interceptor, Handle<Name> name) { \
DCHECK_NAME_COMPATIBLE(interceptor, name); \
Isolate* isolate = this->isolate(); \
- RuntimeCallTimerScope timer( \
- isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \
Handle<Object> receiver_check_unsupported; \
GenericNamedProperty##FUNCTION##Callback f = \
ToCData<GenericNamedProperty##FUNCTION##Callback>( \
@@ -120,8 +119,7 @@ FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
Handle<InterceptorInfo> interceptor, uint32_t index) { \
DCHECK(!interceptor->is_named()); \
Isolate* isolate = this->isolate(); \
- RuntimeCallTimerScope timer( \
- isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \
Handle<Object> receiver_check_unsupported; \
IndexedProperty##FUNCTION##Callback f = \
ToCData<IndexedProperty##FUNCTION##Callback>(interceptor->TYPE()); \
@@ -142,7 +140,7 @@ FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo handler) {
Isolate* isolate = this->isolate();
LOG(isolate, ApiObjectAccess("call", holder()));
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback);
v8::FunctionCallback f =
v8::ToCData<v8::FunctionCallback>(handler.callback());
Handle<Object> receiver_check_unsupported;
@@ -163,8 +161,7 @@ Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
Handle<InterceptorInfo> interceptor) {
DCHECK(interceptor->is_named());
LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder()));
- RuntimeCallTimerScope timer(isolate(),
- RuntimeCallCounterId::kNamedEnumeratorCallback);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kNamedEnumeratorCallback);
return CallPropertyEnumerator(interceptor);
}
@@ -172,8 +169,7 @@ Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
Handle<InterceptorInfo> interceptor) {
DCHECK(!interceptor->is_named());
LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder()));
- RuntimeCallTimerScope timer(isolate(),
- RuntimeCallCounterId::kIndexedEnumeratorCallback);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kIndexedEnumeratorCallback);
return CallPropertyEnumerator(interceptor);
}
@@ -181,8 +177,7 @@ Handle<Object> PropertyCallbackArguments::CallNamedGetter(
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
DCHECK_NAME_COMPATIBLE(interceptor, name);
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kNamedGetterCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-getter", holder(), *name));
GenericNamedPropertyGetterCallback f =
@@ -194,8 +189,7 @@ Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
DCHECK_NAME_COMPATIBLE(interceptor, name);
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kNamedDescriptorCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDescriptorCallback);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-descriptor", holder(), *name));
GenericNamedPropertyDescriptorCallback f =
@@ -222,8 +216,7 @@ Handle<Object> PropertyCallbackArguments::CallNamedSetter(
GenericNamedPropertySetterCallback f =
ToCData<GenericNamedPropertySetterCallback>(interceptor->setter());
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kNamedSetterCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedSetterCallback);
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
LOG(isolate,
@@ -237,8 +230,7 @@ Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
const v8::PropertyDescriptor& desc) {
DCHECK_NAME_COMPATIBLE(interceptor, name);
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kNamedDefinerCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDefinerCallback);
GenericNamedPropertyDefinerCallback f =
ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
@@ -253,8 +245,7 @@ Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
Handle<InterceptorInfo> interceptor, uint32_t index, Handle<Object> value) {
DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kIndexedSetterCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedSetterCallback);
IndexedPropertySetterCallback f =
ToCData<IndexedPropertySetterCallback>(interceptor->setter());
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
@@ -270,8 +261,7 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
const v8::PropertyDescriptor& desc) {
DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kIndexedDefinerCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDefinerCallback);
IndexedPropertyDefinerCallback f =
ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
@@ -286,8 +276,7 @@ Handle<Object> PropertyCallbackArguments::CallIndexedGetter(
Handle<InterceptorInfo> interceptor, uint32_t index) {
DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kNamedGetterCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-getter", holder(), index));
IndexedPropertyGetterCallback f =
@@ -299,8 +288,7 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
Handle<InterceptorInfo> interceptor, uint32_t index) {
DCHECK(!interceptor->is_named());
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kIndexedDescriptorCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDescriptorCallback);
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-descriptor",
holder(), index));
IndexedPropertyDescriptorCallback f =
@@ -338,8 +326,7 @@ Handle<JSObject> PropertyCallbackArguments::CallPropertyEnumerator(
Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
Handle<AccessorInfo> info, Handle<Name> name) {
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kAccessorGetterCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorGetterCallback);
LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
AccessorNameGetterCallback f =
ToCData<AccessorNameGetterCallback>(info->getter());
@@ -351,8 +338,7 @@ Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
Handle<AccessorInfo> accessor_info, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kAccessorSetterCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorSetterCallback);
AccessorNameSetterCallback f =
ToCData<AccessorNameSetterCallback>(accessor_info->setter());
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, void, accessor_info,
diff --git a/chromium/v8/src/api/api-inl.h b/chromium/v8/src/api/api-inl.h
index 849364e655c..84b9b288bb0 100644
--- a/chromium/v8/src/api/api-inl.h
+++ b/chromium/v8/src/api/api-inl.h
@@ -139,6 +139,7 @@ class V8_NODISCARD CallDepthScope {
CallDepthScope(i::Isolate* isolate, Local<Context> context)
: isolate_(isolate),
context_(context),
+ did_enter_context_(false),
escaped_(false),
safe_for_termination_(isolate->next_v8_call_is_safe_for_termination()),
interrupts_scope_(isolate_, i::StackGuard::TERMINATE_EXECUTION,
@@ -152,12 +153,11 @@ class V8_NODISCARD CallDepthScope {
if (!context.IsEmpty()) {
i::Handle<i::Context> env = Utils::OpenHandle(*context);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
- if (!isolate->context().is_null() &&
- isolate->context().native_context() == env->native_context()) {
- context_ = Local<Context>();
- } else {
+ if (isolate->context().is_null() ||
+ isolate->context().native_context() != env->native_context()) {
impl->SaveContext(isolate->context());
isolate->set_context(*env);
+ did_enter_context_ = true;
}
}
if (do_callback) isolate_->FireBeforeCallEnteredCallback();
@@ -165,16 +165,17 @@ class V8_NODISCARD CallDepthScope {
~CallDepthScope() {
i::MicrotaskQueue* microtask_queue = isolate_->default_microtask_queue();
if (!context_.IsEmpty()) {
- i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer();
- isolate_->set_context(impl->RestoreContext());
+ if (did_enter_context_) {
+ i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer();
+ isolate_->set_context(impl->RestoreContext());
+ }
i::Handle<i::Context> env = Utils::OpenHandle(*context_);
microtask_queue = env->native_context().microtask_queue();
}
if (!escaped_) isolate_->thread_local_top()->DecrementCallDepth(this);
if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue);
-// TODO(jochen): This should be #ifdef DEBUG
-#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
+#ifdef DEBUG
if (do_callback) {
if (microtask_queue && microtask_queue->microtasks_policy() ==
v8::MicrotasksPolicy::kScoped) {
@@ -213,9 +214,9 @@ class V8_NODISCARD CallDepthScope {
i::Isolate* const isolate_;
Local<Context> context_;
- bool escaped_;
- bool do_callback_;
- bool safe_for_termination_;
+ bool did_enter_context_ : 1;
+ bool escaped_ : 1;
+ bool safe_for_termination_ : 1;
i::InterruptsScope interrupts_scope_;
i::Address previous_stack_height_;
diff --git a/chromium/v8/src/api/api-macros.h b/chromium/v8/src/api/api-macros.h
index b126e1cd5a0..9b339321e7a 100644
--- a/chromium/v8/src/api/api-macros.h
+++ b/chromium/v8/src/api/api-macros.h
@@ -30,9 +30,9 @@
* TODO(jochen): Remove calls form API methods to DO_NOT_USE macros.
*/
-#define LOG_API(isolate, class_name, function_name) \
- i::RuntimeCallTimerScope _runtime_timer( \
- isolate, i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \
+#define LOG_API(isolate, class_name, function_name) \
+ RCS_SCOPE(isolate, \
+ i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \
LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
@@ -126,7 +126,3 @@
EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing<T>())
#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
-
-// TODO(jochen): This should be #ifdef DEBUG
-#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY
-#endif
diff --git a/chromium/v8/src/api/api-natives.cc b/chromium/v8/src/api/api-natives.cc
index 56bf5bd47c4..46d54f6f587 100644
--- a/chromium/v8/src/api/api-natives.cc
+++ b/chromium/v8/src/api/api-natives.cc
@@ -74,9 +74,9 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<Object> setter,
PropertyAttributes attributes) {
DCHECK(!getter->IsFunctionTemplateInfo() ||
- !FunctionTemplateInfo::cast(*getter).do_not_cache());
+ FunctionTemplateInfo::cast(*getter).should_cache());
DCHECK(!setter->IsFunctionTemplateInfo() ||
- !FunctionTemplateInfo::cast(*setter).do_not_cache());
+ FunctionTemplateInfo::cast(*setter).should_cache());
if (getter->IsFunctionTemplateInfo() &&
FunctionTemplateInfo::cast(*getter).BreakAtEntry()) {
ASSIGN_RETURN_ON_EXCEPTION(
@@ -184,8 +184,7 @@ Object GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) {
template <typename TemplateInfoT>
MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<TemplateInfoT> data) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kConfigureInstance);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kConfigureInstance);
HandleScope scope(isolate);
// Disable access checks while instantiating the object.
AccessCheckDisableScope access_check_scope(isolate, obj);
@@ -288,16 +287,20 @@ enum class CachingMode { kLimited, kUnlimited };
MaybeHandle<JSObject> ProbeInstantiationsCache(
Isolate* isolate, Handle<NativeContext> native_context, int serial_number,
CachingMode caching_mode) {
- DCHECK_LE(1, serial_number);
- if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
+ DCHECK_NE(serial_number, TemplateInfo::kDoNotCache);
+ if (serial_number == TemplateInfo::kUncached) {
+ return {};
+ }
+
+ if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) {
FixedArray fast_cache =
native_context->fast_template_instantiations_cache();
- Handle<Object> object{fast_cache.get(serial_number - 1), isolate};
+ Handle<Object> object{fast_cache.get(serial_number), isolate};
if (object->IsTheHole(isolate)) return {};
return Handle<JSObject>::cast(object);
}
if (caching_mode == CachingMode::kUnlimited ||
- (serial_number <= TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
+ (serial_number < TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
SimpleNumberDictionary slow_cache =
native_context->slow_template_instantiations_cache();
InternalIndex entry = slow_cache.FindEntry(isolate, serial_number);
@@ -310,19 +313,27 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(
void CacheTemplateInstantiation(Isolate* isolate,
Handle<NativeContext> native_context,
- int serial_number, CachingMode caching_mode,
+ Handle<TemplateInfo> data,
+ CachingMode caching_mode,
Handle<JSObject> object) {
- DCHECK_LE(1, serial_number);
- if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
+ DCHECK_NE(TemplateInfo::kDoNotCache, data->serial_number());
+
+ int serial_number = data->serial_number();
+ if (serial_number == TemplateInfo::kUncached) {
+ serial_number = isolate->heap()->GetNextTemplateSerialNumber();
+ }
+
+ if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
handle(native_context->fast_template_instantiations_cache(), isolate);
Handle<FixedArray> new_cache =
- FixedArray::SetAndGrow(isolate, fast_cache, serial_number - 1, object);
+ FixedArray::SetAndGrow(isolate, fast_cache, serial_number, object);
if (*new_cache != *fast_cache) {
native_context->set_fast_template_instantiations_cache(*new_cache);
}
+ data->set_serial_number(serial_number);
} else if (caching_mode == CachingMode::kUnlimited ||
- (serial_number <=
+ (serial_number <
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> cache =
handle(native_context->slow_template_instantiations_cache(), isolate);
@@ -331,20 +342,28 @@ void CacheTemplateInstantiation(Isolate* isolate,
if (*new_cache != *cache) {
native_context->set_slow_template_instantiations_cache(*new_cache);
}
+ data->set_serial_number(serial_number);
+ } else {
+ // we've overflowed the cache limit, no more caching
+ data->set_serial_number(TemplateInfo::kDoNotCache);
}
}
void UncacheTemplateInstantiation(Isolate* isolate,
Handle<NativeContext> native_context,
- int serial_number, CachingMode caching_mode) {
- DCHECK_LE(1, serial_number);
- if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
+ Handle<TemplateInfo> data,
+ CachingMode caching_mode) {
+ int serial_number = data->serial_number();
+ if (serial_number < 0) return;
+
+ if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) {
FixedArray fast_cache =
native_context->fast_template_instantiations_cache();
- DCHECK(!fast_cache.get(serial_number - 1).IsUndefined(isolate));
- fast_cache.set_undefined(serial_number - 1);
+ DCHECK(!fast_cache.get(serial_number).IsUndefined(isolate));
+ fast_cache.set_undefined(serial_number);
+ data->set_serial_number(TemplateInfo::kUncached);
} else if (caching_mode == CachingMode::kUnlimited ||
- (serial_number <=
+ (serial_number <
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> cache =
handle(native_context->slow_template_instantiations_cache(), isolate);
@@ -352,6 +371,7 @@ void UncacheTemplateInstantiation(Isolate* isolate,
DCHECK(entry.is_found());
cache = SimpleNumberDictionary::DeleteEntry(isolate, cache, entry);
native_context->set_slow_template_instantiations_cache(*cache);
+ data->set_serial_number(TemplateInfo::kUncached);
}
}
@@ -371,23 +391,22 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
Handle<ObjectTemplateInfo> info,
Handle<JSReceiver> new_target,
bool is_prototype) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kInstantiateObject);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kInstantiateObject);
Handle<JSFunction> constructor;
- int serial_number = info->serial_number();
+ bool should_cache = info->should_cache();
if (!new_target.is_null()) {
if (IsSimpleInstantiation(isolate, *info, *new_target)) {
constructor = Handle<JSFunction>::cast(new_target);
} else {
// Disable caching for subclass instantiation.
- serial_number = 0;
+ should_cache = false;
}
}
// Fast path.
Handle<JSObject> result;
- if (serial_number) {
+ if (should_cache && info->is_cached()) {
if (ProbeInstantiationsCache(isolate, isolate->native_context(),
- serial_number, CachingMode::kLimited)
+ info->serial_number(), CachingMode::kLimited)
.ToHandle(&result)) {
return isolate->factory()->CopyJSObject(result);
}
@@ -430,9 +449,9 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
// TODO(dcarney): is this necessary?
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
// Don't cache prototypes.
- if (serial_number) {
- CacheTemplateInstantiation(isolate, isolate->native_context(),
- serial_number, CachingMode::kLimited, result);
+ if (should_cache) {
+ CacheTemplateInstantiation(isolate, isolate->native_context(), info,
+ CachingMode::kLimited, result);
result = isolate->factory()->CopyJSObject(result);
}
}
@@ -465,12 +484,11 @@ MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
MaybeHandle<JSFunction> InstantiateFunction(
Isolate* isolate, Handle<NativeContext> native_context,
Handle<FunctionTemplateInfo> data, MaybeHandle<Name> maybe_name) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kInstantiateFunction);
- int serial_number = data->serial_number();
- if (serial_number) {
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kInstantiateFunction);
+ bool should_cache = data->should_cache();
+ if (should_cache && data->is_cached()) {
Handle<JSObject> result;
- if (ProbeInstantiationsCache(isolate, native_context, serial_number,
+ if (ProbeInstantiationsCache(isolate, native_context, data->serial_number(),
CachingMode::kUnlimited)
.ToHandle(&result)) {
return Handle<JSFunction>::cast(result);
@@ -504,7 +522,7 @@ MaybeHandle<JSFunction> InstantiateFunction(
GetInstancePrototype(isolate, parent),
JSFunction);
CHECK(parent_prototype->IsHeapObject());
- JSObject::ForceSetPrototype(Handle<JSObject>::cast(prototype),
+ JSObject::ForceSetPrototype(isolate, Handle<JSObject>::cast(prototype),
Handle<HeapObject>::cast(parent_prototype));
}
}
@@ -517,18 +535,16 @@ MaybeHandle<JSFunction> InstantiateFunction(
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
isolate, native_context, data, prototype, function_type, maybe_name);
- if (serial_number) {
+ if (should_cache) {
// Cache the function.
- CacheTemplateInstantiation(isolate, native_context, serial_number,
+ CacheTemplateInstantiation(isolate, native_context, data,
CachingMode::kUnlimited, function);
}
MaybeHandle<JSObject> result = ConfigureInstance(isolate, function, data);
if (result.is_null()) {
// Uncache on error.
- if (serial_number) {
- UncacheTemplateInstantiation(isolate, native_context, serial_number,
- CachingMode::kUnlimited);
- }
+ UncacheTemplateInstantiation(isolate, native_context, data,
+ CachingMode::kUnlimited);
return MaybeHandle<JSFunction>();
}
data->set_published(true);
@@ -596,7 +612,8 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
object_map->set_may_have_interesting_symbols(true);
Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(object_map);
- JSObject::ForceSetPrototype(object, isolate->factory()->null_value());
+ JSObject::ForceSetPrototype(isolate, object,
+ isolate->factory()->null_value());
return object;
}
@@ -653,8 +670,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<NativeContext> native_context,
Handle<FunctionTemplateInfo> obj, Handle<Object> prototype,
InstanceType type, MaybeHandle<Name> maybe_name) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kCreateApiFunction);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCreateApiFunction);
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj,
maybe_name);
diff --git a/chromium/v8/src/api/api.cc b/chromium/v8/src/api/api.cc
index a5c658a7992..9d1e53d3bb6 100644
--- a/chromium/v8/src/api/api.cc
+++ b/chromium/v8/src/api/api.cc
@@ -568,7 +568,7 @@ StartupData SnapshotCreator::CreateBlob(
i::GarbageCollectionReason::kSnapshotCreator);
{
i::HandleScope scope(isolate);
- isolate->heap()->CompactWeakArrayLists(internal::AllocationType::kOld);
+ isolate->heap()->CompactWeakArrayLists();
}
i::Snapshot::ClearReconstructableDataForSerialization(
@@ -1093,9 +1093,13 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
// --- T e m p l a t e ---
-static void InitializeTemplate(i::TemplateInfo that, int type) {
+static void InitializeTemplate(i::TemplateInfo that, int type,
+ bool do_not_cache) {
that.set_number_of_properties(0);
that.set_tag(type);
+ int serial_number =
+ do_not_cache ? i::TemplateInfo::kDoNotCache : i::TemplateInfo::kUncached;
+ that.set_serial_number(serial_number);
}
void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
@@ -1105,15 +1109,18 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto value_obj = Utils::OpenHandle(*value);
+
Utils::ApiCheck(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo(),
"v8::Template::Set",
"Invalid value, must be a primitive or a Template");
+
+ // The template cache only performs shallow clones, if we set an
+ // ObjectTemplate as a property value then we can not cache the receiver
+ // template.
if (value_obj->IsObjectTemplateInfo()) {
- templ->set_serial_number(0);
- if (templ->IsFunctionTemplateInfo()) {
- i::Handle<i::FunctionTemplateInfo>::cast(templ)->set_do_not_cache(true);
- }
+ templ->set_serial_number(i::TemplateInfo::kDoNotCache);
}
+
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
value_obj,
static_cast<i::PropertyAttributes>(attribute));
@@ -1145,8 +1152,9 @@ void Template::SetAccessorProperty(v8::Local<v8::Name> name,
}
// --- F u n c t i o n T e m p l a t e ---
-static void InitializeFunctionTemplate(i::FunctionTemplateInfo info) {
- InitializeTemplate(info, Consts::FUNCTION_TEMPLATE);
+static void InitializeFunctionTemplate(i::FunctionTemplateInfo info,
+ bool do_not_cache) {
+ InitializeTemplate(info, Consts::FUNCTION_TEMPLATE, do_not_cache);
info.set_flag(0);
}
@@ -1177,7 +1185,7 @@ void FunctionTemplate::SetPrototypeProviderTemplate(
Utils::OpenHandle(*prototype_provider);
Utils::ApiCheck(self->GetPrototypeTemplate().IsUndefined(i_isolate),
"v8::FunctionTemplate::SetPrototypeProviderTemplate",
- "Protoype must be undefiend");
+ "Protoype must be undefined");
Utils::ApiCheck(self->GetParentTemplate().IsUndefined(i_isolate),
"v8::FunctionTemplate::SetPrototypeProviderTemplate",
"Prototype provider must be empty");
@@ -1210,7 +1218,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
bool do_not_cache,
v8::Local<Private> cached_property_name = v8::Local<Private>(),
SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const CFunction* c_function = nullptr) {
+ const MemorySpan<const CFunction>& c_function_overloads = {}) {
i::Handle<i::Struct> struct_obj = isolate->factory()->NewStruct(
i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld);
i::Handle<i::FunctionTemplateInfo> obj =
@@ -1219,14 +1227,8 @@ static Local<FunctionTemplate> FunctionTemplateNew(
// Disallow GC until all fields of obj have acceptable types.
i::DisallowGarbageCollection no_gc;
i::FunctionTemplateInfo raw = *obj;
- InitializeFunctionTemplate(raw);
+ InitializeFunctionTemplate(raw, do_not_cache);
raw.set_length(length);
- raw.set_do_not_cache(do_not_cache);
- int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
- if (!do_not_cache) {
- next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
- }
- raw.set_serial_number(next_serial_number);
raw.set_undetectable(false);
raw.set_needs_access_check(false);
raw.set_accept_any_receiver(true);
@@ -1241,7 +1243,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
}
if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type,
- c_function);
+ c_function_overloads);
}
return Utils::ToLocal(obj);
}
@@ -1255,10 +1257,24 @@ Local<FunctionTemplate> FunctionTemplate::New(
// function templates when the isolate is created for serialization.
LOG_API(i_isolate, FunctionTemplate, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- auto templ = FunctionTemplateNew(i_isolate, callback, data, signature, length,
- behavior, false, Local<Private>(),
- side_effect_type, c_function);
- return templ;
+ return FunctionTemplateNew(
+ i_isolate, callback, data, signature, length, behavior, false,
+ Local<Private>(), side_effect_type,
+ c_function ? MemorySpan<const CFunction>{c_function, 1}
+ : MemorySpan<const CFunction>{});
+}
+
+Local<FunctionTemplate> FunctionTemplate::NewWithCFunctionOverloads(
+ Isolate* isolate, FunctionCallback callback, v8::Local<Value> data,
+ v8::Local<Signature> signature, int length, ConstructorBehavior behavior,
+ SideEffectType side_effect_type,
+ const MemorySpan<const CFunction>& c_function_overloads) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ LOG_API(i_isolate, FunctionTemplate, New);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ return FunctionTemplateNew(i_isolate, callback, data, signature, length,
+ behavior, false, Local<Private>(),
+ side_effect_type, c_function_overloads);
}
Local<FunctionTemplate> FunctionTemplate::NewWithCache(
@@ -1289,10 +1305,10 @@ Local<AccessorSignature> AccessorSignature::New(
(obj)->setter(*foreign); \
} while (false)
-void FunctionTemplate::SetCallHandler(FunctionCallback callback,
- v8::Local<Value> data,
- SideEffectType side_effect_type,
- const CFunction* c_function) {
+void FunctionTemplate::SetCallHandler(
+ FunctionCallback callback, v8::Local<Value> data,
+ SideEffectType side_effect_type,
+ const MemorySpan<const CFunction>& c_function_overloads) {
auto info = Utils::OpenHandle(this);
EnsureNotPublished(info, "v8::FunctionTemplate::SetCallHandler");
i::Isolate* isolate = info->GetIsolate();
@@ -1306,15 +1322,28 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
}
obj->set_data(*Utils::OpenHandle(*data));
- // Blink passes CFunction's constructed with the default constructor
- // for non-fast calls, so we should check the address too.
- if (c_function != nullptr && c_function->GetAddress()) {
- i::FunctionTemplateInfo::SetCFunction(
- isolate, info,
- i::handle(*FromCData(isolate, c_function->GetAddress()), isolate));
- i::FunctionTemplateInfo::SetCSignature(
- isolate, info,
- i::handle(*FromCData(isolate, c_function->GetTypeInfo()), isolate));
+ if (c_function_overloads.size() > 0) {
+ // Stores the data for a sequence of CFunction overloads into a single
+ // FixedArray, as [address_0, signature_0, ... address_n-1, signature_n-1].
+ i::Handle<i::FixedArray> function_overloads =
+ isolate->factory()->NewFixedArray(static_cast<int>(
+ c_function_overloads.size() *
+ i::FunctionTemplateInfo::kFunctionOverloadEntrySize));
+ int function_count = static_cast<int>(c_function_overloads.size());
+ for (int i = 0; i < function_count; i++) {
+ const CFunction& c_function = c_function_overloads.data()[i];
+ i::Handle<i::Object> address =
+ FromCData(isolate, c_function.GetAddress());
+ function_overloads->set(
+ i::FunctionTemplateInfo::kFunctionOverloadEntrySize * i, *address);
+ i::Handle<i::Object> signature =
+ FromCData(isolate, c_function.GetTypeInfo());
+ function_overloads->set(
+ i::FunctionTemplateInfo::kFunctionOverloadEntrySize * i + 1,
+ *signature);
+ }
+ i::FunctionTemplateInfo::SetCFunctionOverloads(isolate, info,
+ function_overloads);
}
info->set_call_code(*obj, kReleaseStore);
}
@@ -1442,13 +1471,8 @@ static Local<ObjectTemplate> ObjectTemplateNew(
// Disallow GC until all fields of obj have acceptable types.
i::DisallowGarbageCollection no_gc;
i::ObjectTemplateInfo raw = *obj;
- InitializeTemplate(raw, Consts::OBJECT_TEMPLATE);
+ InitializeTemplate(raw, Consts::OBJECT_TEMPLATE, do_not_cache);
raw.set_data(0);
- int next_serial_number = 0;
- if (!do_not_cache) {
- next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
- }
- raw.set_serial_number(next_serial_number);
if (!constructor.IsEmpty()) {
raw.set_constructor(*Utils::OpenHandle(*constructor));
}
@@ -1920,26 +1944,44 @@ MaybeLocal<Value> Script::Run(Local<Context> context) {
ENTER_V8(isolate, context, Script, Run, MaybeLocal<Value>(),
InternalEscapableScope);
i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true);
- i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
+ i::AggregatingHistogramTimerScope histogram_timer(
+ isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
// TODO(crbug.com/1193459): remove once ablation study is completed
- if (i::FLAG_script_run_delay) {
- v8::base::OS::Sleep(
- v8::base::TimeDelta::FromMilliseconds(i::FLAG_script_run_delay));
+ base::ElapsedTimer timer;
+ base::TimeDelta delta;
+ if (i::FLAG_script_delay > 0) {
+ delta = v8::base::TimeDelta::FromMillisecondsD(i::FLAG_script_delay);
}
- if (i::FLAG_script_run_delay_once && !isolate->did_run_script_delay()) {
- v8::base::OS::Sleep(
- v8::base::TimeDelta::FromMilliseconds(i::FLAG_script_run_delay_once));
+ if (i::FLAG_script_delay_once > 0 && !isolate->did_run_script_delay()) {
+ delta = v8::base::TimeDelta::FromMillisecondsD(i::FLAG_script_delay_once);
isolate->set_did_run_script_delay(true);
}
+ if (i::FLAG_script_delay_fraction > 0.0) {
+ timer.Start();
+ } else if (delta.InMicroseconds() > 0) {
+ timer.Start();
+ while (timer.Elapsed() < delta) {
+ // Busy wait.
+ }
+ }
i::Handle<i::Object> receiver = isolate->global_proxy();
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
i::Execution::Call(isolate, fun, receiver, 0, nullptr), &result);
+ if (i::FLAG_script_delay_fraction > 0.0) {
+ delta = v8::base::TimeDelta::FromMillisecondsD(
+ timer.Elapsed().InMillisecondsF() * i::FLAG_script_delay_fraction);
+ timer.Restart();
+ while (timer.Elapsed() < delta) {
+ // Busy wait.
+ }
+ }
+
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -4053,34 +4095,56 @@ Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
v8::Local<Name> key,
v8::Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing<bool>(),
- i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Name> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- Maybe<bool> result = i::JSReceiver::CreateDataProperty(
- isolate, self, key_obj, value_obj, Just(i::kDontThrow));
- has_pending_exception = result.IsNothing();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return result;
+ i::LookupIterator::Key lookup_key(isolate, key_obj);
+ i::LookupIterator it(isolate, self, lookup_key, i::LookupIterator::OWN);
+ if (self->IsJSProxy()) {
+ ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing<bool>(),
+ i::HandleScope);
+ Maybe<bool> result =
+ i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow));
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+ } else {
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, CreateDataProperty,
+ Nothing<bool>(), i::HandleScope);
+ Maybe<bool> result =
+ i::JSObject::CreateDataProperty(&it, value_obj, Just(i::kDontThrow));
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+ }
}
Maybe<bool> v8::Object::CreateDataProperty(v8::Local<v8::Context> context,
uint32_t index,
v8::Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing<bool>(),
- i::HandleScope);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::LookupIterator it(isolate, self, index, self, i::LookupIterator::OWN);
- Maybe<bool> result =
- i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow));
- has_pending_exception = result.IsNothing();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return result;
+ if (self->IsJSProxy()) {
+ ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing<bool>(),
+ i::HandleScope);
+ Maybe<bool> result =
+ i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow));
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+ } else {
+ ENTER_V8_NO_SCRIPT(isolate, context, Object, CreateDataProperty,
+ Nothing<bool>(), i::HandleScope);
+ Maybe<bool> result =
+ i::JSObject::CreateDataProperty(&it, value_obj, Just(i::kDontThrow));
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+ }
}
struct v8::PropertyDescriptor::PrivateData {
@@ -4330,17 +4394,27 @@ Local<Value> v8::Object::GetPrototype() {
Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
Local<Value> value) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
- ENTER_V8(isolate, context, Object, SetPrototype, Nothing<bool>(),
- i::HandleScope);
auto self = Utils::OpenHandle(this);
auto value_obj = Utils::OpenHandle(*value);
- // We do not allow exceptions thrown while setting the prototype
- // to propagate outside.
- TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
- auto result =
- i::JSReceiver::SetPrototype(self, value_obj, false, i::kThrowOnError);
- has_pending_exception = result.IsNothing();
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ if (self->IsJSProxy()) {
+ ENTER_V8(isolate, context, Object, SetPrototype, Nothing<bool>(),
+ i::HandleScope);
+ // We do not allow exceptions thrown while setting the prototype
+ // to propagate outside.
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ auto result = i::JSProxy::SetPrototype(i::Handle<i::JSProxy>::cast(self),
+ value_obj, false, i::kThrowOnError);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ } else {
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ auto result = i::JSObject::SetPrototype(i::Handle<i::JSObject>::cast(self),
+ value_obj, false, i::kThrowOnError);
+ if (result.IsNothing()) {
+ isolate->clear_pending_exception();
+ return Nothing<bool>();
+ }
+ }
return Just(true);
}
@@ -5015,25 +5089,6 @@ Local<Value> Function::GetDebugName() const {
return Utils::ToLocal(i::Handle<i::Object>(*name, self->GetIsolate()));
}
-Local<Value> Function::GetDisplayName() const {
- auto self = Utils::OpenHandle(this);
- i::Isolate* isolate = self->GetIsolate();
- ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
- if (!self->IsJSFunction()) {
- return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
- }
- auto func = i::Handle<i::JSFunction>::cast(self);
- i::Handle<i::String> property_name =
- isolate->factory()->InternalizeString(i::StaticCharVector("displayName"));
- i::Handle<i::Object> value =
- i::JSReceiver::GetDataProperty(func, property_name);
- if (value->IsString()) {
- i::Handle<i::String> name = i::Handle<i::String>::cast(value);
- if (name->length() > 0) return Utils::ToLocal(name);
- }
- return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
-}
-
ScriptOrigin Function::GetScriptOrigin() const {
auto self = Utils::OpenHandle(this);
auto isolate = reinterpret_cast<v8::Isolate*>(self->GetIsolate());
@@ -5590,19 +5645,33 @@ Local<Value> Symbol::Description() const {
// RO_SPACE. Since RO_SPACE objects are immovable we can use the
// Handle(Address*) constructor with the address of the description
// field in the Symbol object without needing an isolate.
- DCHECK(!COMPRESS_POINTERS_BOOL);
+ DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
+#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
i::Handle<i::HeapObject> ro_description(reinterpret_cast<i::Address*>(
sym->GetFieldAddress(i::Symbol::kDescriptionOffset)));
return Utils::ToLocal(ro_description);
+#else
+ isolate = reinterpret_cast<i::Isolate*>(Isolate::GetCurrent());
+#endif
}
- i::Handle<i::Object> description(sym->description(), isolate);
+ return Description(reinterpret_cast<Isolate*>(isolate));
+}
+Local<Value> Symbol::Description(Isolate* isolate) const {
+ i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
+ i::Handle<i::Object> description(sym->description(),
+ reinterpret_cast<i::Isolate*>(isolate));
return Utils::ToLocal(description);
}
Local<Value> Private::Name() const {
- return reinterpret_cast<const Symbol*>(this)->Description();
+ const Symbol* sym = reinterpret_cast<const Symbol*>(this);
+ i::Handle<i::Symbol> i_sym = Utils::OpenHandle(sym);
+ // v8::Private symbols are created by API and are therefore writable, so we
+ // can always recover an Isolate.
+ i::Isolate* isolate = i::GetIsolateFromWritableObject(*i_sym);
+ return sym->Description(reinterpret_cast<Isolate*>(isolate));
}
double Number::Value() const {
@@ -5886,12 +5955,7 @@ void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
-void V8::SetIsCrossOriginIsolated() {
- i::FLAG_harmony_sharedarraybuffer = true;
-#if V8_ENABLE_WEBASSEMBLY
- i::FLAG_experimental_wasm_threads = true;
-#endif // V8_ENABLE_WEBASSEMBLY
-}
+void V8::SetIsCrossOriginIsolated() {}
template <typename ObjectType>
struct InvokeBootstrapper;
@@ -6220,6 +6284,45 @@ void Context::SetContinuationPreservedEmbedderData(Local<Value> data) {
*i::Handle<i::HeapObject>::cast(Utils::OpenHandle(*data)));
}
+void v8::Context::SetPromiseHooks(Local<Function> init_hook,
+ Local<Function> before_hook,
+ Local<Function> after_hook,
+ Local<Function> resolve_hook) {
+ i::Handle<i::Context> context = Utils::OpenHandle(this);
+ i::Isolate* isolate = context->GetIsolate();
+
+ i::Handle<i::Object> init = isolate->factory()->undefined_value();
+ i::Handle<i::Object> before = isolate->factory()->undefined_value();
+ i::Handle<i::Object> after = isolate->factory()->undefined_value();
+ i::Handle<i::Object> resolve = isolate->factory()->undefined_value();
+
+ bool has_hook = false;
+
+ if (!init_hook.IsEmpty()) {
+ init = Utils::OpenHandle(*init_hook);
+ has_hook = true;
+ }
+ if (!before_hook.IsEmpty()) {
+ before = Utils::OpenHandle(*before_hook);
+ has_hook = true;
+ }
+ if (!after_hook.IsEmpty()) {
+ after = Utils::OpenHandle(*after_hook);
+ has_hook = true;
+ }
+ if (!resolve_hook.IsEmpty()) {
+ resolve = Utils::OpenHandle(*resolve_hook);
+ has_hook = true;
+ }
+
+ isolate->SetHasContextPromiseHooks(has_hook);
+
+ context->native_context().set_promise_hook_init_function(*init);
+ context->native_context().set_promise_hook_before_function(*before);
+ context->native_context().set_promise_hook_after_function(*after);
+ context->native_context().set_promise_hook_resolve_function(*resolve);
+}
+
MaybeLocal<Context> metrics::Recorder::GetContext(
Isolate* isolate, metrics::Recorder::ContextId id) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -6234,6 +6337,11 @@ metrics::Recorder::ContextId metrics::Recorder::GetContextId(
handle(i_context->native_context(), isolate));
}
+metrics::LongTaskStats metrics::LongTaskStats::Get(v8::Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ return *i_isolate->GetCurrentLongTaskStats();
+}
+
namespace {
i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate,
i::FixedArray list, size_t index) {
@@ -6352,6 +6460,16 @@ bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
return false;
}
+bool FunctionTemplate::IsLeafTemplateForApiObject(
+ v8::Local<v8::Value> value) const {
+ i::DisallowGarbageCollection no_gc;
+
+ i::Object object = *Utils::OpenHandle(*value);
+
+ auto self = Utils::OpenHandle(this);
+ return self->IsLeafTemplateForApiObject(object);
+}
+
Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -7965,6 +8083,10 @@ v8::Local<v8::Context> Isolate::GetIncumbentContext() {
return Utils::ToLocal(context);
}
+v8::Local<Value> Isolate::ThrowError(v8::Local<v8::String> message) {
+ return ThrowException(v8::Exception::Error(message));
+}
+
v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
ENTER_V8_DO_NOT_USE(isolate);
@@ -8037,6 +8159,11 @@ EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() {
return isolate->heap()->GetEmbedderHeapTracer();
}
+void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->SetEmbedderRootsHandler(handler);
+}
+
void Isolate::AttachCppHeap(CppHeap* cpp_heap) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->AttachCppHeap(cpp_heap);
@@ -8455,10 +8582,11 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
}
} else {
i::Space* space = heap->space(static_cast<int>(index));
- space_statistics->space_size_ = space->CommittedMemory();
- space_statistics->space_used_size_ = space->SizeOfObjects();
- space_statistics->space_available_size_ = space->Available();
- space_statistics->physical_space_size_ = space->CommittedPhysicalMemory();
+ space_statistics->space_size_ = space ? space->CommittedMemory() : 0;
+ space_statistics->space_used_size_ = space ? space->SizeOfObjects() : 0;
+ space_statistics->space_available_size_ = space ? space->Available() : 0;
+ space_statistics->physical_space_size_ =
+ space ? space->CommittedPhysicalMemory() : 0;
}
return true;
}
@@ -8806,10 +8934,9 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- const base::AddressRegion& code_range =
- isolate->heap()->memory_allocator()->code_range();
- *start = reinterpret_cast<void*>(code_range.begin());
- *length_in_bytes = code_range.size();
+ const base::AddressRegion& code_region = isolate->heap()->code_region();
+ *start = reinterpret_cast<void*>(code_region.begin());
+ *length_in_bytes = code_region.size();
}
void Isolate::GetEmbeddedCodeRange(const void** start,
@@ -8891,12 +9018,19 @@ CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback,
CALLBACK_SETTER(WasmExceptionsEnabledCallback, WasmExceptionsEnabledCallback,
wasm_exceptions_enabled_callback)
+CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
+ SharedArrayBufferConstructorEnabledCallback,
+ sharedarraybuffer_constructor_enabled_callback)
+
void Isolate::InstallConditionalFeatures(Local<Context> context) {
-#if V8_ENABLE_WEBASSEMBLY
v8::HandleScope handle_scope(this);
v8::Context::Scope context_scope(context);
- i::WasmJs::InstallConditionalFeatures(reinterpret_cast<i::Isolate*>(this),
- Utils::OpenHandle(*context));
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->InstallConditionalFeatures(Utils::OpenHandle(*context));
+#if V8_ENABLE_WEBASSEMBLY
+ if (i::FLAG_expose_wasm) {
+ i::WasmJs::InstallConditionalFeatures(isolate, Utils::OpenHandle(*context));
+ }
#endif // V8_ENABLE_WEBASSEMBLY
}
@@ -9379,7 +9513,21 @@ CpuProfilingOptions::CpuProfilingOptions(CpuProfilingMode mode,
MaybeLocal<Context> filter_context)
: mode_(mode),
max_samples_(max_samples),
- sampling_interval_us_(sampling_interval_us) {}
+ sampling_interval_us_(sampling_interval_us) {
+ if (!filter_context.IsEmpty()) {
+ Local<Context> local_filter_context = filter_context.ToLocalChecked();
+ filter_context_.Reset(local_filter_context->GetIsolate(),
+ local_filter_context);
+ filter_context_.SetWeak();
+ }
+}
+
+void* CpuProfilingOptions::raw_filter_context() const {
+ return reinterpret_cast<void*>(
+ i::Context::cast(*Utils::OpenPersistent(filter_context_))
+ .native_context()
+ .address());
+}
void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
@@ -9660,10 +9808,11 @@ void HeapProfiler::ClearObjectIds() {
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
ActivityControl* control, ObjectNameResolver* resolver,
- bool treat_global_objects_as_roots) {
+ bool treat_global_objects_as_roots, bool capture_numeric_value) {
return reinterpret_cast<const HeapSnapshot*>(
reinterpret_cast<i::HeapProfiler*>(this)->TakeSnapshot(
- control, resolver, treat_global_objects_as_roots));
+ control, resolver, treat_global_objects_as_roots,
+ capture_numeric_value));
}
void HeapProfiler::StartTrackingHeapObjects(bool track_allocations) {
@@ -9727,7 +9876,8 @@ void EmbedderHeapTracer::SetStackStart(void* stack_start) {
void EmbedderHeapTracer::NotifyEmptyEmbedderStack() {
CHECK(isolate_);
reinterpret_cast<i::Isolate*>(isolate_)
- ->global_handles()
+ ->heap()
+ ->local_embedder_heap_tracer()
->NotifyEmptyEmbedderStack();
}
@@ -10026,8 +10176,7 @@ void InvokeAccessorGetterCallback(
v8::AccessorNameGetterCallback getter) {
// Leaving JavaScript.
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kAccessorGetterCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorGetterCallback);
Address getter_address = reinterpret_cast<Address>(getter);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, getter_address);
@@ -10037,7 +10186,7 @@ void InvokeAccessorGetterCallback(
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback) {
Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback);
Address callback_address = reinterpret_cast<Address>(callback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, callback_address);
@@ -10049,8 +10198,8 @@ void InvokeFinalizationRegistryCleanupFromTask(
Handle<JSFinalizationRegistry> finalization_registry,
Handle<Object> callback) {
Isolate* isolate = finalization_registry->native_context().GetIsolate();
- RuntimeCallTimerScope timer(
- isolate, RuntimeCallCounterId::kFinalizationRegistryCleanupFromTask);
+ RCS_SCOPE(isolate,
+ RuntimeCallCounterId::kFinalizationRegistryCleanupFromTask);
// Do not use ENTER_V8 because this is always called from a running
// FinalizationRegistryCleanupTask within V8 and we should not log it as an
// API call. This method is implemented here to avoid duplication of the
diff --git a/chromium/v8/src/ast/ast-value-factory.cc b/chromium/v8/src/ast/ast-value-factory.cc
index 6e454b22f1d..a300359a0a5 100644
--- a/chromium/v8/src/ast/ast-value-factory.cc
+++ b/chromium/v8/src/ast/ast-value-factory.cc
@@ -61,8 +61,8 @@ class OneByteStringStream {
} // namespace
-template <typename LocalIsolate>
-void AstRawString::Internalize(LocalIsolate* isolate) {
+template <typename IsolateT>
+void AstRawString::Internalize(IsolateT* isolate) {
DCHECK(!has_string_);
if (literal_bytes_.length() == 0) {
set_string(isolate->factory()->empty_string());
@@ -185,8 +185,8 @@ int AstRawString::Compare(const AstRawString* lhs, const AstRawString* rhs) {
return lhs->byte_length() - rhs->byte_length();
}
-template <typename LocalIsolate>
-Handle<String> AstConsString::Allocate(LocalIsolate* isolate) const {
+template <typename IsolateT>
+Handle<String> AstConsString::Allocate(IsolateT* isolate) const {
DCHECK(string_.is_null());
if (IsEmpty()) {
@@ -210,8 +210,8 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Handle<String> AstConsString::Allocate<LocalIsolate>(
LocalIsolate* isolate) const;
-template <typename LocalIsolate>
-Handle<String> AstConsString::AllocateFlat(LocalIsolate* isolate) const {
+template <typename IsolateT>
+Handle<String> AstConsString::AllocateFlat(IsolateT* isolate) const {
if (IsEmpty()) {
return isolate->factory()->empty_string();
}
@@ -370,8 +370,8 @@ AstConsString* AstValueFactory::NewConsString(const AstRawString* str1,
return NewConsString()->AddString(zone(), str1)->AddString(zone(), str2);
}
-template <typename LocalIsolate>
-void AstValueFactory::Internalize(LocalIsolate* isolate) {
+template <typename IsolateT>
+void AstValueFactory::Internalize(IsolateT* isolate) {
if (!zone_) return;
// Strings need to be internalized before values, because values refer to
diff --git a/chromium/v8/src/ast/ast-value-factory.h b/chromium/v8/src/ast/ast-value-factory.h
index b66e11f99fa..290da7838f7 100644
--- a/chromium/v8/src/ast/ast-value-factory.h
+++ b/chromium/v8/src/ast/ast-value-factory.h
@@ -65,8 +65,8 @@ class AstRawString final : public ZoneObject {
V8_EXPORT_PRIVATE bool IsOneByteEqualTo(const char* data) const;
uint16_t FirstCharacter() const;
- template <typename LocalIsolate>
- void Internalize(LocalIsolate* isolate);
+ template <typename IsolateT>
+ void Internalize(IsolateT* isolate);
// Access the physical representation:
bool is_one_byte() const { return is_one_byte_; }
@@ -161,17 +161,17 @@ class AstConsString final : public ZoneObject {
return segment_.string == nullptr;
}
- template <typename LocalIsolate>
- Handle<String> GetString(LocalIsolate* isolate) {
+ template <typename IsolateT>
+ Handle<String> GetString(IsolateT* isolate) {
if (string_.is_null()) {
string_ = Allocate(isolate);
}
return string_;
}
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<String> AllocateFlat(LocalIsolate* isolate) const;
+ Handle<String> AllocateFlat(IsolateT* isolate) const;
std::forward_list<const AstRawString*> ToRawStrings() const;
@@ -181,9 +181,9 @@ class AstConsString final : public ZoneObject {
AstConsString() : string_(), segment_({nullptr, nullptr}) {}
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<String> Allocate(LocalIsolate* isolate) const;
+ Handle<String> Allocate(IsolateT* isolate) const;
Handle<String> string_;
@@ -354,8 +354,8 @@ class AstValueFactory {
// Internalize all the strings in the factory, and prevent any more from being
// allocated. Multiple calls to Internalize are allowed, for simplicity, where
// subsequent calls are a no-op.
- template <typename LocalIsolate>
- void Internalize(LocalIsolate* isolate);
+ template <typename IsolateT>
+ void Internalize(IsolateT* isolate);
#define F(name, str) \
const AstRawString* name##_string() const { \
diff --git a/chromium/v8/src/ast/ast.cc b/chromium/v8/src/ast/ast.cc
index 5515a4a3fcc..3054f99f1cd 100644
--- a/chromium/v8/src/ast/ast.cc
+++ b/chromium/v8/src/ast/ast.cc
@@ -443,8 +443,8 @@ int ObjectLiteral::InitDepthAndFlags() {
return depth_acc;
}
-template <typename LocalIsolate>
-void ObjectLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) {
+template <typename IsolateT>
+void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
if (!boilerplate_description_.is_null()) return;
int index_keys = 0;
@@ -597,8 +597,8 @@ int ArrayLiteral::InitDepthAndFlags() {
return depth_acc;
}
-template <typename LocalIsolate>
-void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) {
+template <typename IsolateT>
+void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) {
if (!boilerplate_description_.is_null()) return;
int constants_length =
@@ -642,7 +642,7 @@ void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) {
}
// New handle scope here, needs to be after BuildContants().
- typename LocalIsolate::HandleScopeType scope(isolate);
+ typename IsolateT::HandleScopeType scope(isolate);
Object boilerplate_value = *GetBoilerplateValue(element, isolate);
// We shouldn't allocate after creating the boilerplate value.
@@ -697,9 +697,9 @@ bool MaterializedLiteral::IsSimple() const {
return false;
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (expression->IsLiteral()) {
return expression->AsLiteral()->BuildValue(isolate);
}
@@ -742,8 +742,8 @@ bool MaterializedLiteral::NeedsInitialAllocationSite() {
return false;
}
-template <typename LocalIsolate>
-void MaterializedLiteral::BuildConstants(LocalIsolate* isolate) {
+template <typename IsolateT>
+void MaterializedLiteral::BuildConstants(IsolateT* isolate) {
if (IsArrayLiteral()) {
AsArrayLiteral()->BuildBoilerplateDescription(isolate);
return;
@@ -760,9 +760,9 @@ template EXPORT_TEMPLATE_DEFINE(
V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(LocalIsolate*
isolate);
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
Handle<FixedArray> raw_strings = isolate->factory()->NewFixedArray(
this->raw_strings()->length(), AllocationType::kOld);
bool raw_and_cooked_match = true;
@@ -978,8 +978,8 @@ bool Literal::AsArrayIndex(uint32_t* value) const {
return ToUint32(value) && *value != kMaxUInt32;
}
-template <typename LocalIsolate>
-Handle<Object> Literal::BuildValue(LocalIsolate* isolate) const {
+template <typename IsolateT>
+Handle<Object> Literal::BuildValue(IsolateT* isolate) const {
switch (type()) {
case kSmi:
return handle(Smi::FromInt(smi_), isolate);
diff --git a/chromium/v8/src/ast/ast.h b/chromium/v8/src/ast/ast.h
index e11e6c458ff..e9c85920014 100644
--- a/chromium/v8/src/ast/ast.h
+++ b/chromium/v8/src/ast/ast.h
@@ -986,8 +986,8 @@ class Literal final : public Expression {
// Returns an appropriate Object representing this Literal, allocating
// a heap object if needed.
- template <typename LocalIsolate>
- Handle<Object> BuildValue(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Handle<Object> BuildValue(IsolateT* isolate) const;
// Support for using Literal as a HashMap key. NOTE: Currently, this works
// only for string and number literals!
@@ -1058,17 +1058,16 @@ class MaterializedLiteral : public Expression {
bool NeedsInitialAllocationSite();
// Populate the constant properties/elements fixed array.
- template <typename LocalIsolate>
- void BuildConstants(LocalIsolate* isolate);
+ template <typename IsolateT>
+ void BuildConstants(IsolateT* isolate);
// If the expression is a literal, return the literal value;
// if the expression is a materialized literal and is_simple
// then return an Array or Object Boilerplate Description
// Otherwise, return undefined literal as the placeholder
// in the object literal boilerplate.
- template <typename LocalIsolate>
- Handle<Object> GetBoilerplateValue(Expression* expression,
- LocalIsolate* isolate);
+ template <typename IsolateT>
+ Handle<Object> GetBoilerplateValue(Expression* expression, IsolateT* isolate);
};
// Node for capturing a regexp literal.
@@ -1265,9 +1264,9 @@ class ObjectLiteral final : public AggregateLiteral {
int InitDepthAndFlags();
// Get the boilerplate description, populating it if necessary.
- template <typename LocalIsolate>
+ template <typename IsolateT>
Handle<ObjectBoilerplateDescription> GetOrBuildBoilerplateDescription(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (boilerplate_description_.is_null()) {
BuildBoilerplateDescription(isolate);
}
@@ -1275,8 +1274,8 @@ class ObjectLiteral final : public AggregateLiteral {
}
// Populate the boilerplate description.
- template <typename LocalIsolate>
- void BuildBoilerplateDescription(LocalIsolate* isolate);
+ template <typename IsolateT>
+ void BuildBoilerplateDescription(IsolateT* isolate);
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@@ -1366,9 +1365,9 @@ class ArrayLiteral final : public AggregateLiteral {
int InitDepthAndFlags();
// Get the boilerplate description, populating it if necessary.
- template <typename LocalIsolate>
+ template <typename IsolateT>
Handle<ArrayBoilerplateDescription> GetOrBuildBoilerplateDescription(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (boilerplate_description_.is_null()) {
BuildBoilerplateDescription(isolate);
}
@@ -1376,8 +1375,8 @@ class ArrayLiteral final : public AggregateLiteral {
}
// Populate the boilerplate description.
- template <typename LocalIsolate>
- void BuildBoilerplateDescription(LocalIsolate* isolate);
+ template <typename IsolateT>
+ void BuildBoilerplateDescription(IsolateT* isolate);
// Determines whether the {CreateShallowArrayLiteral} builtin can be used.
bool IsFastCloningSupported() const;
@@ -2121,8 +2120,8 @@ class FunctionLiteral final : public Expression {
// Empty handle means that the function does not have a shared name (i.e.
// the name will be set dynamically after creation of the function closure).
- template <typename LocalIsolate>
- MaybeHandle<String> GetName(LocalIsolate* isolate) const {
+ template <typename IsolateT>
+ MaybeHandle<String> GetName(IsolateT* isolate) const {
return raw_name_ ? raw_name_->AllocateFlat(isolate) : MaybeHandle<String>();
}
bool has_shared_name() const { return raw_name_ != nullptr; }
@@ -2644,9 +2643,8 @@ class GetTemplateObject final : public Expression {
return raw_strings_;
}
- template <typename LocalIsolate>
- Handle<TemplateObjectDescription> GetOrBuildDescription(
- LocalIsolate* isolate);
+ template <typename IsolateT>
+ Handle<TemplateObjectDescription> GetOrBuildDescription(IsolateT* isolate);
private:
friend class AstNodeFactory;
diff --git a/chromium/v8/src/ast/modules.cc b/chromium/v8/src/ast/modules.cc
index 62dc6191414..6d1bff226d9 100644
--- a/chromium/v8/src/ast/modules.cc
+++ b/chromium/v8/src/ast/modules.cc
@@ -116,17 +116,17 @@ void SourceTextModuleDescriptor::AddStarExport(
}
namespace {
-template <typename LocalIsolate>
-Handle<PrimitiveHeapObject> ToStringOrUndefined(LocalIsolate* isolate,
+template <typename IsolateT>
+Handle<PrimitiveHeapObject> ToStringOrUndefined(IsolateT* isolate,
const AstRawString* s) {
if (s == nullptr) return isolate->factory()->undefined_value();
return s->string();
}
} // namespace
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ModuleRequest> SourceTextModuleDescriptor::AstModuleRequest::Serialize(
- LocalIsolate* isolate) const {
+ IsolateT* isolate) const {
// The import assertions will be stored in this array in the form:
// [key1, value1, location1, key2, value2, location2, ...]
Handle<FixedArray> import_assertions_array =
@@ -151,9 +151,9 @@ template Handle<ModuleRequest>
SourceTextModuleDescriptor::AstModuleRequest::Serialize(
LocalIsolate* isolate) const;
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SourceTextModuleInfoEntry> SourceTextModuleDescriptor::Entry::Serialize(
- LocalIsolate* isolate) const {
+ IsolateT* isolate) const {
CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier?
return SourceTextModuleInfoEntry::New(
isolate, ToStringOrUndefined(isolate, export_name),
@@ -166,9 +166,9 @@ SourceTextModuleDescriptor::Entry::Serialize(Isolate* isolate) const;
template Handle<SourceTextModuleInfoEntry>
SourceTextModuleDescriptor::Entry::Serialize(LocalIsolate* isolate) const;
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<FixedArray> SourceTextModuleDescriptor::SerializeRegularExports(
- LocalIsolate* isolate, Zone* zone) const {
+ IsolateT* isolate, Zone* zone) const {
// We serialize regular exports in a way that lets us later iterate over their
// local names and for each local name immediately access all its export
// names. (Regular exports have neither import name nor module request.)
diff --git a/chromium/v8/src/ast/modules.h b/chromium/v8/src/ast/modules.h
index f776d2b5226..f496a0bb85a 100644
--- a/chromium/v8/src/ast/modules.h
+++ b/chromium/v8/src/ast/modules.h
@@ -115,8 +115,8 @@ class SourceTextModuleDescriptor : public ZoneObject {
module_request(-1),
cell_index(0) {}
- template <typename LocalIsolate>
- Handle<SourceTextModuleInfoEntry> Serialize(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Handle<SourceTextModuleInfoEntry> Serialize(IsolateT* isolate) const;
};
enum CellIndexKind { kInvalid, kExport, kImport };
@@ -132,8 +132,8 @@ class SourceTextModuleDescriptor : public ZoneObject {
position_(position),
index_(index) {}
- template <typename LocalIsolate>
- Handle<v8::internal::ModuleRequest> Serialize(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Handle<v8::internal::ModuleRequest> Serialize(IsolateT* isolate) const;
const AstRawString* specifier() const { return specifier_; }
const ImportAssertions* import_assertions() const {
@@ -225,8 +225,8 @@ class SourceTextModuleDescriptor : public ZoneObject {
namespace_imports_.push_back(entry);
}
- template <typename LocalIsolate>
- Handle<FixedArray> SerializeRegularExports(LocalIsolate* isolate,
+ template <typename IsolateT>
+ Handle<FixedArray> SerializeRegularExports(IsolateT* isolate,
Zone* zone) const;
private:
diff --git a/chromium/v8/src/ast/scopes.cc b/chromium/v8/src/ast/scopes.cc
index de9b25a5c59..e5b621a283a 100644
--- a/chromium/v8/src/ast/scopes.cc
+++ b/chromium/v8/src/ast/scopes.cc
@@ -623,9 +623,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
}
bool DeclarationScope::Analyze(ParseInfo* info) {
- RuntimeCallTimerScope runtimeTimer(
- info->runtime_call_stats(), RuntimeCallCounterId::kCompileScopeAnalysis,
- RuntimeCallStats::kThreadSpecific);
+ RCS_SCOPE(info->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileScopeAnalysis,
+ RuntimeCallStats::kThreadSpecific);
DCHECK_NOT_NULL(info->literal());
DeclarationScope* scope = info->literal()->scope();
@@ -2532,8 +2532,8 @@ void Scope::AllocateVariablesRecursively() {
});
}
-template <typename LocalIsolate>
-void Scope::AllocateScopeInfosRecursively(LocalIsolate* isolate,
+template <typename IsolateT>
+void Scope::AllocateScopeInfosRecursively(IsolateT* isolate,
MaybeHandle<ScopeInfo> outer_scope) {
DCHECK(scope_info_.is_null());
MaybeHandle<ScopeInfo> next_outer_scope = outer_scope;
@@ -2603,9 +2603,8 @@ void DeclarationScope::RecordNeedsPrivateNameContextChainRecalc() {
}
// static
-template <typename LocalIsolate>
-void DeclarationScope::AllocateScopeInfos(ParseInfo* info,
- LocalIsolate* isolate) {
+template <typename IsolateT>
+void DeclarationScope::AllocateScopeInfos(ParseInfo* info, IsolateT* isolate) {
DeclarationScope* scope = info->literal()->scope();
// No one else should have allocated a scope info for this scope yet.
diff --git a/chromium/v8/src/ast/scopes.h b/chromium/v8/src/ast/scopes.h
index 717c797383b..2aa0c237679 100644
--- a/chromium/v8/src/ast/scopes.h
+++ b/chromium/v8/src/ast/scopes.h
@@ -716,8 +716,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
V8_INLINE void AllocateNonParameterLocalsAndDeclaredGlobals();
void AllocateVariablesRecursively();
- template <typename LocalIsolate>
- void AllocateScopeInfosRecursively(LocalIsolate* isolate,
+ template <typename IsolateT>
+ void AllocateScopeInfosRecursively(IsolateT* isolate,
MaybeHandle<ScopeInfo> outer_scope);
void AllocateDebuggerScopeInfos(Isolate* isolate,
@@ -1151,9 +1151,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Allocate ScopeInfos for top scope and any inner scopes that need them.
// Does nothing if ScopeInfo is already allocated.
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_EXPORT_PRIVATE static void AllocateScopeInfos(ParseInfo* info,
- LocalIsolate* isolate);
+ IsolateT* isolate);
Handle<StringSet> CollectNonLocals(Isolate* isolate,
Handle<StringSet> non_locals);
diff --git a/chromium/v8/src/ast/variables.cc b/chromium/v8/src/ast/variables.cc
index da2d8387f51..4f1f46150ff 100644
--- a/chromium/v8/src/ast/variables.cc
+++ b/chromium/v8/src/ast/variables.cc
@@ -30,14 +30,15 @@ bool Variable::IsGlobalObjectProperty() const {
scope_ != nullptr && scope_->is_script_scope();
}
-bool Variable::IsReplGlobalLet() const {
- return scope()->is_repl_mode_scope() && mode() == VariableMode::kLet;
+bool Variable::IsReplGlobal() const {
+ return scope()->is_repl_mode_scope() &&
+ (mode() == VariableMode::kLet || mode() == VariableMode::kConst);
}
void Variable::RewriteLocationForRepl() {
DCHECK(scope_->is_repl_mode_scope());
- if (mode() == VariableMode::kLet) {
+ if (mode() == VariableMode::kLet || mode() == VariableMode::kConst) {
DCHECK_EQ(location(), VariableLocation::CONTEXT);
bit_field_ =
LocationField::update(bit_field_, VariableLocation::REPL_GLOBAL);
diff --git a/chromium/v8/src/ast/variables.h b/chromium/v8/src/ast/variables.h
index 7c6ee4324e8..ec31eb0689e 100644
--- a/chromium/v8/src/ast/variables.h
+++ b/chromium/v8/src/ast/variables.h
@@ -125,8 +125,9 @@ class Variable final : public ZoneObject {
bool IsLookupSlot() const { return location() == VariableLocation::LOOKUP; }
bool IsGlobalObjectProperty() const;
- // True for 'let' variables declared in the script scope of a REPL script.
- bool IsReplGlobalLet() const;
+ // True for 'let' and 'const' variables declared in the script scope of a REPL
+ // script.
+ bool IsReplGlobal() const;
bool is_dynamic() const { return IsDynamicVariableMode(mode()); }
diff --git a/chromium/v8/src/base/atomicops.h b/chromium/v8/src/base/atomicops.h
index 5d6422be520..cb6940ea70a 100644
--- a/chromium/v8/src/base/atomicops.h
+++ b/chromium/v8/src/base/atomicops.h
@@ -27,6 +27,8 @@
#include <stdint.h>
+#include <atomic>
+
// Small C++ header which defines implementation specific macros used to
// identify the STL implementation.
// - libc++: captures __config for _LIBCPP_VERSION
@@ -35,6 +37,7 @@
#include "src/base/base-export.h"
#include "src/base/build_config.h"
+#include "src/base/macros.h"
#if defined(V8_OS_STARBOARD)
#include "starboard/atomic.h"
@@ -77,6 +80,21 @@ using AtomicWord = SbAtomicPtr;
using AtomicWord = intptr_t;
#endif
+namespace helper {
+template <typename T>
+volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
+ return reinterpret_cast<volatile std::atomic<T>*>(ptr);
+}
+template <typename T>
+volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
+ return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
+}
+} // namespace helper
+
+inline void SeqCst_MemoryFence() {
+ std::atomic_thread_fence(std::memory_order_seq_cst);
+}
+
// Atomically execute:
// result = *ptr;
// if (result == old_value)
@@ -86,75 +104,225 @@ using AtomicWord = intptr_t;
// I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|.
// Always return the value of |*ptr| before the operation.
// Acquire, Relaxed, Release correspond to standard C++ memory orders.
-Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value);
-Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value,
- Atomic16 new_value);
-Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value);
-Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
- Atomic32 new_value);
-Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
- Atomic32 new_value);
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value);
-Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
- Atomic64 new_value);
-#endif // V8_HOST_ARCH_64_BIT
-
-// Atomically store new_value into |*ptr|, returning the previous value held in
-// |*ptr|.
-Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
-#endif // V8_HOST_ARCH_64_BIT
-
-// Atomically increment |*ptr| by |increment|. Returns the new value of
-// |*ptr| with the increment applied.
-Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
-
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
-#endif // V8_HOST_ARCH_64_BIT
-
-void SeqCst_MemoryFence();
-
-void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
-void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value);
-void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
-void Release_Store(volatile Atomic32* ptr, Atomic32 value);
-#ifdef V8_HOST_ARCH_64_BIT
-void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
-void Release_Store(volatile Atomic64* ptr, Atomic64 value);
-#endif // V8_HOST_ARCH_64_BIT
-
-Atomic8 Relaxed_Load(volatile const Atomic8* ptr);
-Atomic16 Relaxed_Load(volatile const Atomic16* ptr);
-Atomic32 Relaxed_Load(volatile const Atomic32* ptr);
-Atomic32 Acquire_Load(volatile const Atomic32* ptr);
-#ifdef V8_HOST_ARCH_64_BIT
-Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
-Atomic64 Acquire_Load(volatile const Atomic64* ptr);
-#endif // V8_HOST_ARCH_64_BIT
+inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
+ Atomic8 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
+ Atomic16 old_value, Atomic16 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
+ std::memory_order_relaxed);
+}
+
+inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
+ increment,
+ std::memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acquire, std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
+ Atomic8 new_value) {
+ bool result = atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_release, std::memory_order_relaxed);
+ USE(result); // Make gcc compiler happy.
+ return old_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_release, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acq_rel, std::memory_order_acquire);
+ return old_value;
+}
+
+inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
+inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_acquire);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_acquire);
+}
+
+#if defined(V8_HOST_ARCH_64_BIT)
+
+inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_relaxed, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
+ std::memory_order_relaxed);
+}
+
+inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
+ increment,
+ std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acquire, std::memory_order_acquire);
+ return old_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_release, std::memory_order_relaxed);
+ return old_value;
+}
+
+inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ std::atomic_compare_exchange_strong_explicit(
+ helper::to_std_atomic(ptr), &old_value, new_value,
+ std::memory_order_acq_rel, std::memory_order_acquire);
+ return old_value;
+}
+
+inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_relaxed);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
+inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
+ std::memory_order_acquire);
+}
+
+#endif // defined(V8_HOST_ARCH_64_BIT)
+
+inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src,
+ size_t bytes) {
+ constexpr size_t kAtomicWordSize = sizeof(AtomicWord);
+ while (bytes > 0 &&
+ !IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
+ Relaxed_Store(dst++, Relaxed_Load(src++));
+ --bytes;
+ }
+ if (IsAligned(reinterpret_cast<uintptr_t>(src), kAtomicWordSize) &&
+ IsAligned(reinterpret_cast<uintptr_t>(dst), kAtomicWordSize)) {
+ while (bytes >= kAtomicWordSize) {
+ Relaxed_Store(
+ reinterpret_cast<volatile AtomicWord*>(dst),
+ Relaxed_Load(reinterpret_cast<const volatile AtomicWord*>(src)));
+ dst += kAtomicWordSize;
+ src += kAtomicWordSize;
+ bytes -= kAtomicWordSize;
+ }
+ }
+ while (bytes > 0) {
+ Relaxed_Store(dst++, Relaxed_Load(src++));
+ --bytes;
+ }
+}
} // namespace base
} // namespace v8
-#if defined(V8_OS_WIN) || defined(V8_OS_STARBOARD)
-#include "src/base/atomicops_internals_std.h"
-#else
-// TODO(ulan): Switch to std version after performance regression with Wheezy
-// sysroot is no longer relevant. Debian Wheezy LTS ends on 31st of May 2018.
-#include "src/base/atomicops_internals_portable.h"
-#endif
-
// On some platforms we need additional declarations to make
// AtomicWord compatible with our other Atomic* types.
#if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
diff --git a/chromium/v8/src/base/atomicops_internals_portable.h b/chromium/v8/src/base/atomicops_internals_portable.h
deleted file mode 100644
index ac162e2a825..00000000000
--- a/chromium/v8/src/base/atomicops_internals_portable.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This file is an internal atomic implementation, use atomicops.h instead.
-//
-// This implementation uses C++11 atomics' member functions. The code base is
-// currently written assuming atomicity revolves around accesses instead of
-// C++11's memory locations. The burden is on the programmer to ensure that all
-// memory locations accessed atomically are never accessed non-atomically (tsan
-// should help with this).
-//
-// Of note in this implementation:
-// * All NoBarrier variants are implemented as relaxed.
-// * All Barrier variants are implemented as sequentially-consistent.
-// * Compare exchange's failure ordering is always the same as the success one
-// (except for release, which fails as relaxed): using a weaker ordering is
-// only valid under certain uses of compare exchange.
-// * Acquire store doesn't exist in the C11 memory model, it is instead
-// implemented as a relaxed store followed by a sequentially consistent
-// fence.
-// * Release load doesn't exist in the C11 memory model, it is instead
-// implemented as sequentially consistent fence followed by a relaxed load.
-// * Atomic increment is expected to return the post-incremented value, whereas
-// C11 fetch add returns the previous value. The implementation therefore
-// needs to increment twice (which the compiler should be able to detect and
-// optimize).
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
-
-#include <atomic>
-
-#include "src/base/build_config.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace base {
-
-// This implementation is transitional and maintains the original API for
-// atomicops.h.
-
-inline void SeqCst_MemoryFence() {
-#if defined(__GLIBCXX__)
- // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but
- // not defined, leading to the linker complaining about undefined references.
- __atomic_thread_fence(std::memory_order_seq_cst);
-#else
- std::atomic_thread_fence(std::memory_order_seq_cst);
-#endif
-}
-
-inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- USE(result); // Make gcc compiler happy.
- return old_value;
-}
-
-inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
- Atomic16 old_value, Atomic16 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
-}
-
-inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- USE(result); // Make gcc compiler happy.
- return old_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-}
-
-inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-}
-
-#if defined(V8_HOST_ARCH_64_BIT)
-
-inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELAXED, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED);
-}
-
-inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- return old_value;
-}
-
-inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- __atomic_compare_exchange_n(ptr, &old_value, new_value, false,
- __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELAXED);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __atomic_store_n(ptr, value, __ATOMIC_RELEASE);
-}
-
-inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_RELAXED);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
-}
-
-#endif // defined(V8_HOST_ARCH_64_BIT)
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
diff --git a/chromium/v8/src/base/atomicops_internals_std.h b/chromium/v8/src/base/atomicops_internals_std.h
deleted file mode 100644
index 1638b8b52a0..00000000000
--- a/chromium/v8/src/base/atomicops_internals_std.h
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_BASE_ATOMICOPS_INTERNALS_STD_H_
-#define V8_BASE_ATOMICOPS_INTERNALS_STD_H_
-
-#include <atomic>
-
-#include "src/base/build_config.h"
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace base {
-
-namespace helper {
-template <typename T>
-volatile std::atomic<T>* to_std_atomic(volatile T* ptr) {
- return reinterpret_cast<volatile std::atomic<T>*>(ptr);
-}
-template <typename T>
-volatile const std::atomic<T>* to_std_atomic_const(volatile const T* ptr) {
- return reinterpret_cast<volatile const std::atomic<T>*>(ptr);
-}
-} // namespace helper
-
-inline void SeqCst_MemoryFence() {
- std::atomic_thread_fence(std::memory_order_seq_cst);
-}
-
-inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr,
- Atomic16 old_value, Atomic16 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
- std::memory_order_relaxed);
-}
-
-inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
- increment,
- std::memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acquire, std::memory_order_acquire);
- return old_value;
-}
-
-inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value,
- Atomic8 new_value) {
- bool result = atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_release, std::memory_order_relaxed);
- USE(result); // Make gcc compiler happy.
- return old_value;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value, Atomic32 new_value) {
- atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_release, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acq_rel, std::memory_order_acquire);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_release);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_release);
-}
-
-inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_acquire);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_acquire);
-}
-
-#if defined(V8_HOST_ARCH_64_BIT)
-
-inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_relaxed, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value,
- std::memory_order_relaxed);
-}
-
-inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr),
- increment,
- std::memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acquire, std::memory_order_acquire);
- return old_value;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value, Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_release, std::memory_order_relaxed);
- return old_value;
-}
-
-inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- std::atomic_compare_exchange_strong_explicit(
- helper::to_std_atomic(ptr), &old_value, new_value,
- std::memory_order_acq_rel, std::memory_order_acquire);
- return old_value;
-}
-
-inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_relaxed);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
- std::memory_order_release);
-}
-
-inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
- std::memory_order_acquire);
-}
-
-#endif // defined(V8_HOST_ARCH_64_BIT)
-} // namespace base
-} // namespace v8
-
-#endif // V8_BASE_ATOMICOPS_INTERNALS_STD_H_
diff --git a/chromium/v8/src/base/bit-field.h b/chromium/v8/src/base/bit-field.h
index ca5fb459210..7b2796e3df2 100644
--- a/chromium/v8/src/base/bit-field.h
+++ b/chromium/v8/src/base/bit-field.h
@@ -52,7 +52,7 @@ class BitField final {
// Returns a type U with the bit field value encoded.
static constexpr U encode(T value) {
- CONSTEXPR_DCHECK(is_valid(value));
+ DCHECK(is_valid(value));
return static_cast<U>(value) << kShift;
}
diff --git a/chromium/v8/src/base/bits.h b/chromium/v8/src/base/bits.h
index b137f73936a..f790dfaab47 100644
--- a/chromium/v8/src/base/bits.h
+++ b/chromium/v8/src/base/bits.h
@@ -144,7 +144,7 @@ inline constexpr
typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8,
unsigned>::type
CountTrailingZerosNonZero(T value) {
- CONSTEXPR_DCHECK(value != 0);
+ DCHECK_NE(0, value);
#if V8_HAS_BUILTIN_CTZ
return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value))
: __builtin_ctz(static_cast<uint32_t>(value));
@@ -165,7 +165,7 @@ constexpr inline bool IsPowerOfTwo(T value) {
template <typename T,
typename = typename std::enable_if<std::is_integral<T>::value>::type>
inline constexpr int WhichPowerOfTwo(T value) {
- CONSTEXPR_DCHECK(IsPowerOfTwo(value));
+ DCHECK(IsPowerOfTwo(value));
#if V8_HAS_BUILTIN_CTZ
STATIC_ASSERT(sizeof(T) <= 8);
return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value))
diff --git a/chromium/v8/src/base/bounds.h b/chromium/v8/src/base/bounds.h
index fb8c968d660..0fe141b3097 100644
--- a/chromium/v8/src/base/bounds.h
+++ b/chromium/v8/src/base/bounds.h
@@ -15,7 +15,7 @@ namespace base {
// branch.
template <typename T, typename U>
inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) {
- CONSTEXPR_DCHECK(lower_limit <= higher_limit);
+ DCHECK_LE(lower_limit, higher_limit);
STATIC_ASSERT(sizeof(U) <= sizeof(T));
using unsigned_T = typename std::make_unsigned<T>::type;
// Use static_cast to support enum classes.
diff --git a/chromium/v8/src/base/cpu.cc b/chromium/v8/src/base/cpu.cc
index 17ef42a299a..9ddf8939bef 100644
--- a/chromium/v8/src/base/cpu.cc
+++ b/chromium/v8/src/base/cpu.cc
@@ -50,7 +50,7 @@
#include "src/base/logging.h"
#include "src/base/platform/wrappers.h"
#if V8_OS_WIN
-#include "src/base/win32-headers.h" // NOLINT
+#include "src/base/win32-headers.h"
#endif
namespace v8 {
diff --git a/chromium/v8/src/base/enum-set.h b/chromium/v8/src/base/enum-set.h
index f623198c2d2..ce49b3996ed 100644
--- a/chromium/v8/src/base/enum-set.h
+++ b/chromium/v8/src/base/enum-set.h
@@ -79,7 +79,7 @@ class EnumSet {
explicit constexpr EnumSet(T bits) : bits_(bits) {}
static constexpr T Mask(E element) {
- CONSTEXPR_DCHECK(sizeof(T) * 8 > static_cast<size_t>(element));
+ DCHECK_GT(sizeof(T) * 8, static_cast<size_t>(element));
return T{1} << static_cast<typename std::underlying_type<E>::type>(element);
}
diff --git a/chromium/v8/src/base/hashmap.h b/chromium/v8/src/base/hashmap.h
index 179da5ecba6..819d589a81e 100644
--- a/chromium/v8/src/base/hashmap.h
+++ b/chromium/v8/src/base/hashmap.h
@@ -530,8 +530,8 @@ class TemplateHashMap
AllocationPolicy>;
public:
- STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT
- STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT
+ STATIC_ASSERT(sizeof(Key*) == sizeof(void*));
+ STATIC_ASSERT(sizeof(Value*) == sizeof(void*));
struct value_type {
Key* first;
Value* second;
diff --git a/chromium/v8/src/base/logging.h b/chromium/v8/src/base/logging.h
index 2c4c536cf32..08db24a947e 100644
--- a/chromium/v8/src/base/logging.h
+++ b/chromium/v8/src/base/logging.h
@@ -134,12 +134,6 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
#endif
-#if V8_HAS_CXX14_CONSTEXPR
-#define CONSTEXPR_DCHECK(cond) DCHECK(cond)
-#else
-#define CONSTEXPR_DCHECK(cond)
-#endif
-
namespace detail {
template <typename... Ts>
std::string PrintToString(Ts&&... ts) {
diff --git a/chromium/v8/src/base/once.h b/chromium/v8/src/base/once.h
index dd8b6be6213..c4224e84e35 100644
--- a/chromium/v8/src/base/once.h
+++ b/chromium/v8/src/base/once.h
@@ -53,10 +53,12 @@
#define V8_BASE_ONCE_H_
#include <stddef.h>
+
#include <atomic>
#include <functional>
#include "src/base/base-export.h"
+#include "src/base/template-utils.h"
namespace v8 {
namespace base {
@@ -76,9 +78,9 @@ enum : uint8_t {
using PointerArgFunction = void (*)(void* arg);
-template <typename T>
-struct OneArgFunction {
- using type = void (*)(T);
+template <typename... Args>
+struct FunctionWithArgs {
+ using type = void (*)(Args...);
};
V8_BASE_EXPORT void CallOnceImpl(OnceType* once,
@@ -90,11 +92,13 @@ inline void CallOnce(OnceType* once, std::function<void()> init_func) {
}
}
-template <typename Arg>
+template <typename... Args, typename = std::enable_if_t<
+ conjunction<std::is_scalar<Args>...>::value>>
inline void CallOnce(OnceType* once,
- typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
+ typename FunctionWithArgs<Args...>::type init_func,
+ Args... args) {
if (once->load(std::memory_order_acquire) != ONCE_STATE_DONE) {
- CallOnceImpl(once, [=]() { init_func(arg); });
+ CallOnceImpl(once, [=]() { init_func(args...); });
}
}
diff --git a/chromium/v8/src/base/optional.h b/chromium/v8/src/base/optional.h
index 3c13e654c80..77e9bb896e3 100644
--- a/chromium/v8/src/base/optional.h
+++ b/chromium/v8/src/base/optional.h
@@ -558,32 +558,32 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
}
constexpr const T* operator->() const {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr T* operator->() {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return &storage_.value_;
}
constexpr const T& operator*() const& {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr T& operator*() & {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return storage_.value_;
}
constexpr const T&& operator*() const&& {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
constexpr T&& operator*() && {
- CONSTEXPR_DCHECK(storage_.is_populated_);
+ DCHECK(storage_.is_populated_);
return std::move(storage_.value_);
}
diff --git a/chromium/v8/src/base/platform/mutex.h b/chromium/v8/src/base/platform/mutex.h
index 5685797f4ee..328c593a30e 100644
--- a/chromium/v8/src/base/platform/mutex.h
+++ b/chromium/v8/src/base/platform/mutex.h
@@ -14,7 +14,7 @@
#include "src/base/logging.h"
#if V8_OS_POSIX
-#include <pthread.h> // NOLINT
+#include <pthread.h>
#endif
#if V8_OS_STARBOARD
@@ -164,6 +164,8 @@ class V8_BASE_EXPORT RecursiveMutex final {
// successfully locked.
bool TryLock() V8_WARN_UNUSED_RESULT;
+ V8_INLINE void AssertHeld() const { DCHECK_LT(0, level_); }
+
private:
// The implementation-defined native handle type.
#if V8_OS_POSIX
diff --git a/chromium/v8/src/base/platform/platform-aix.cc b/chromium/v8/src/base/platform/platform-aix.cc
index 6b6a870370b..e5a5305d483 100644
--- a/chromium/v8/src/base/platform/platform-aix.cc
+++ b/chromium/v8/src/base/platform/platform-aix.cc
@@ -82,7 +82,7 @@ double AIXTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) {
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16));
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
diff --git a/chromium/v8/src/base/platform/platform-freebsd.cc b/chromium/v8/src/base/platform/platform-freebsd.cc
index edc793c662e..ac36b0527e7 100644
--- a/chromium/v8/src/base/platform/platform-freebsd.cc
+++ b/chromium/v8/src/base/platform/platform-freebsd.cc
@@ -44,7 +44,7 @@ TimezoneCache* OS::CreateTimezoneCache() {
}
static unsigned StringToLong(char* buffer) {
- return static_cast<unsigned>(strtol(buffer, nullptr, 16)); // NOLINT
+ return static_cast<unsigned>(strtol(buffer, nullptr, 16));
}
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
diff --git a/chromium/v8/src/base/platform/platform-posix.cc b/chromium/v8/src/base/platform/platform-posix.cc
index ee787f7d9ab..9f61a0aeb57 100644
--- a/chromium/v8/src/base/platform/platform-posix.cc
+++ b/chromium/v8/src/base/platform/platform-posix.cc
@@ -23,12 +23,12 @@
#include <sys/types.h>
#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
defined(__NetBSD__) || defined(__OpenBSD__)
-#include <sys/sysctl.h> // NOLINT, for sysctl
+#include <sys/sysctl.h> // for sysctl
#endif
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define LOG_TAG "v8"
-#include <android/log.h> // NOLINT
+#include <android/log.h>
#endif
#include <cmath>
@@ -52,7 +52,7 @@
#endif
#if V8_OS_LINUX
-#include <sys/prctl.h> // NOLINT, for prctl
+#include <sys/prctl.h> // for prctl
#endif
#if defined(V8_OS_FUCHSIA)
@@ -82,7 +82,7 @@ extern int madvise(caddr_t, size_t, int);
#endif
#if defined(V8_LIBC_GLIBC)
-extern "C" void* __libc_stack_end; // NOLINT
+extern "C" void* __libc_stack_end;
#endif
namespace v8 {
@@ -936,8 +936,7 @@ static void InitializeTlsBaseOffset() {
buffer[kBufferSize - 1] = '\0';
char* period_pos = strchr(buffer, '.');
*period_pos = '\0';
- int kernel_version_major =
- static_cast<int>(strtol(buffer, nullptr, 10)); // NOLINT
+ int kernel_version_major = static_cast<int>(strtol(buffer, nullptr, 10));
// The constants below are taken from pthreads.s from the XNU kernel
// sources archive at www.opensource.apple.com.
if (kernel_version_major < 11) {
diff --git a/chromium/v8/src/base/platform/platform-win32.cc b/chromium/v8/src/base/platform/platform-win32.cc
index 50da60c72f5..9fbb2570760 100644
--- a/chromium/v8/src/base/platform/platform-win32.cc
+++ b/chromium/v8/src/base/platform/platform-win32.cc
@@ -30,7 +30,7 @@
#include <VersionHelpers.h>
#if defined(_MSC_VER)
-#include <crtdbg.h> // NOLINT
+#include <crtdbg.h>
#endif // defined(_MSC_VER)
// Extra functions for MinGW. Most of these are the _s functions which are in
diff --git a/chromium/v8/src/base/platform/semaphore.h b/chromium/v8/src/base/platform/semaphore.h
index 83a7a3392f7..ec107bd290e 100644
--- a/chromium/v8/src/base/platform/semaphore.h
+++ b/chromium/v8/src/base/platform/semaphore.h
@@ -12,9 +12,9 @@
#endif
#if V8_OS_MACOSX
-#include <dispatch/dispatch.h> // NOLINT
+#include <dispatch/dispatch.h>
#elif V8_OS_POSIX
-#include <semaphore.h> // NOLINT
+#include <semaphore.h>
#endif
#if V8_OS_STARBOARD
diff --git a/chromium/v8/src/base/sanitizer/asan.h b/chromium/v8/src/base/sanitizer/asan.h
new file mode 100644
index 00000000000..291006d58c2
--- /dev/null
+++ b/chromium/v8/src/base/sanitizer/asan.h
@@ -0,0 +1,37 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AddressSanitizer support.
+
+#ifndef V8_BASE_SANITIZER_ASAN_H_
+#define V8_BASE_SANITIZER_ASAN_H_
+
+#include <type_traits>
+
+#include "src/base/macros.h"
+
+#ifdef V8_USE_ADDRESS_SANITIZER
+
+#include <sanitizer/asan_interface.h>
+
+#if !defined(ASAN_POISON_MEMORY_REGION) || !defined(ASAN_UNPOISON_MEMORY_REGION)
+#error \
+ "ASAN_POISON_MEMORY_REGION and ASAN_UNPOISON_MEMORY_REGION must be defined"
+#endif
+
+#else // !V8_USE_ADDRESS_SANITIZER
+
+#define ASAN_POISON_MEMORY_REGION(start, size) \
+ static_assert(std::is_pointer<decltype(start)>::value, \
+ "static type violation"); \
+ static_assert(std::is_convertible<decltype(size), size_t>::value, \
+ "static type violation"); \
+ USE(start, size)
+
+#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
+ ASAN_POISON_MEMORY_REGION(start, size)
+
+#endif // !V8_USE_ADDRESS_SANITIZER
+
+#endif // V8_BASE_SANITIZER_ASAN_H_
diff --git a/chromium/v8/src/sanitizer/lsan-page-allocator.cc b/chromium/v8/src/base/sanitizer/lsan-page-allocator.cc
index 7794e0b734e..bb52eb368fd 100644
--- a/chromium/v8/src/sanitizer/lsan-page-allocator.cc
+++ b/chromium/v8/src/base/sanitizer/lsan-page-allocator.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/sanitizer/lsan-page-allocator.h"
+#include "src/base/sanitizer/lsan-page-allocator.h"
#include "include/v8-platform.h"
#include "src/base/logging.h"
diff --git a/chromium/v8/src/sanitizer/lsan-page-allocator.h b/chromium/v8/src/base/sanitizer/lsan-page-allocator.h
index f86ffd98e84..4c8a1f04a0d 100644
--- a/chromium/v8/src/sanitizer/lsan-page-allocator.h
+++ b/chromium/v8/src/base/sanitizer/lsan-page-allocator.h
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
-#define V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
+#ifndef V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
+#define V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
#include "include/v8-platform.h"
+#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
namespace v8 {
@@ -14,7 +15,7 @@ namespace base {
// This is a v8::PageAllocator implementation that decorates provided page
// allocator object with leak sanitizer notifications when LEAK_SANITIZER
// is defined.
-class LsanPageAllocator : public v8::PageAllocator {
+class V8_BASE_EXPORT LsanPageAllocator : public v8::PageAllocator {
public:
explicit LsanPageAllocator(v8::PageAllocator* page_allocator);
~LsanPageAllocator() override = default;
@@ -56,4 +57,4 @@ class LsanPageAllocator : public v8::PageAllocator {
} // namespace base
} // namespace v8
-#endif // V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
+#endif // V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_
diff --git a/chromium/v8/src/base/lsan.h b/chromium/v8/src/base/sanitizer/lsan.h
index fd9bbd21c1b..2d7dcd7f680 100644
--- a/chromium/v8/src/base/lsan.h
+++ b/chromium/v8/src/base/sanitizer/lsan.h
@@ -4,14 +4,16 @@
// LeakSanitizer support.
-#ifndef V8_BASE_LSAN_H_
-#define V8_BASE_LSAN_H_
+#ifndef V8_BASE_SANITIZER_LSAN_H_
+#define V8_BASE_SANITIZER_LSAN_H_
#include <type_traits>
-// There is no compile time flag for LSan, to enable this whenever ASan is
+#include "src/base/macros.h"
+
+// There is no compile time flag for LSan, so enable this whenever ASan is
// enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'.
-// On windows, LSan is not implemented yet, so disable it there.
+// On Windows, LSan is not implemented yet, so disable it there.
#if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
#include <sanitizer/lsan_interface.h>
@@ -26,4 +28,4 @@
#endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN)
-#endif // V8_BASE_LSAN_H_
+#endif // V8_BASE_SANITIZER_LSAN_H_
diff --git a/chromium/v8/src/base/sanitizer/msan.h b/chromium/v8/src/base/sanitizer/msan.h
new file mode 100644
index 00000000000..e15208efaf8
--- /dev/null
+++ b/chromium/v8/src/base/sanitizer/msan.h
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MemorySanitizer support.
+
+#ifndef V8_BASE_SANITIZER_MSAN_H_
+#define V8_BASE_SANITIZER_MSAN_H_
+
+#include "src/base/macros.h"
+#include "src/base/memory.h"
+
+#ifdef V8_USE_MEMORY_SANITIZER
+
+#include <sanitizer/msan_interface.h>
+
+// Marks a memory range as uninitialized, as if it was allocated here.
+#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) \
+ __msan_allocated_memory(reinterpret_cast<const void*>(start), (size))
+
+// Marks a memory range as initialized.
+#define MSAN_MEMORY_IS_INITIALIZED(start, size) \
+ __msan_unpoison(reinterpret_cast<const void*>(start), (size))
+
+#else // !V8_USE_MEMORY_SANITIZER
+
+#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) \
+ static_assert((std::is_pointer<decltype(start)>::value || \
+ std::is_same<v8::base::Address, decltype(start)>::value), \
+ "static type violation"); \
+ static_assert(std::is_convertible<decltype(size), size_t>::value, \
+ "static type violation"); \
+ USE(start, size)
+
+#define MSAN_MEMORY_IS_INITIALIZED(start, size) \
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size)
+
+#endif // V8_USE_MEMORY_SANITIZER
+
+#endif // V8_BASE_SANITIZER_MSAN_H_
diff --git a/chromium/v8/src/base/small-vector.h b/chromium/v8/src/base/small-vector.h
index c337b9052d4..b087d44be4e 100644
--- a/chromium/v8/src/base/small-vector.h
+++ b/chromium/v8/src/base/small-vector.h
@@ -154,6 +154,13 @@ class SmallVector {
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
T* new_storage =
reinterpret_cast<T*>(base::Malloc(sizeof(T) * new_capacity));
+ if (new_storage == nullptr) {
+ // Should be: V8::FatalProcessOutOfMemory, but we don't include V8 from
+ // base. The message is intentionally the same as FatalProcessOutOfMemory
+ // since that will help fuzzers and chromecrash to categorize such
+ // crashes appropriately.
+ FATAL("Fatal process out of memory: base::SmallVector::Grow");
+ }
base::Memcpy(new_storage, begin_, sizeof(T) * in_use);
if (is_big()) base::Free(begin_);
begin_ = new_storage;
diff --git a/chromium/v8/src/base/template-utils.h b/chromium/v8/src/base/template-utils.h
index 4f082845d95..f222593e2d2 100644
--- a/chromium/v8/src/base/template-utils.h
+++ b/chromium/v8/src/base/template-utils.h
@@ -98,6 +98,15 @@ struct make_void {
template <class... Ts>
using void_t = typename make_void<Ts...>::type;
+// Corresponds to C++17's std::conjunction
+template <class...>
+struct conjunction : std::true_type {};
+template <class B>
+struct conjunction<B> : B {};
+template <class B, class... Bn>
+struct conjunction<B, Bn...>
+ : std::conditional_t<bool(B::value), conjunction<Bn...>, B> {};
+
} // namespace base
} // namespace v8
diff --git a/chromium/v8/src/base/v8-fallthrough.h b/chromium/v8/src/base/v8-fallthrough.h
index f61238de060..a6dc6972d6d 100644
--- a/chromium/v8/src/base/v8-fallthrough.h
+++ b/chromium/v8/src/base/v8-fallthrough.h
@@ -13,7 +13,7 @@
// So do not include this header in any of v8's public headers -- only
// use it in src/, not in include/.
#if defined(__clang__)
-#define V8_FALLTHROUGH [[clang::fallthrough]] // NOLINT(whitespace/braces)
+#define V8_FALLTHROUGH [[clang::fallthrough]]
#else
#define V8_FALLTHROUGH
#endif
diff --git a/chromium/v8/src/base/vlq.h b/chromium/v8/src/base/vlq.h
index baeb5b9430d..96ee42cf6e8 100644
--- a/chromium/v8/src/base/vlq.h
+++ b/chromium/v8/src/base/vlq.h
@@ -14,60 +14,95 @@ namespace v8 {
namespace base {
static constexpr uint32_t kContinueShift = 7;
-static constexpr uint32_t kContinueMask = 1 << kContinueShift;
-static constexpr uint32_t kDataMask = kContinueMask - 1;
+static constexpr uint32_t kContinueBit = 1 << kContinueShift;
+static constexpr uint32_t kDataMask = kContinueBit - 1;
// Encodes an unsigned value using variable-length encoding and stores it using
-// the passed process_byte function.
-inline void VLQEncodeUnsigned(const std::function<void(byte)>& process_byte,
- uint32_t value) {
- bool has_next;
+// the passed process_byte function. The function should return a pointer to
+// the byte that was written, so that VLQEncodeUnsigned can mutate it after
+// writing it.
+template <typename Function>
+inline typename std::enable_if<
+ std::is_same<decltype(std::declval<Function>()(0)), byte*>::value,
+ void>::type
+VLQEncodeUnsigned(Function&& process_byte, uint32_t value) {
+ byte* written_byte = process_byte(value);
+ if (value <= kDataMask) {
+ // Value fits in first byte, early return.
+ return;
+ }
do {
- byte cur_byte = value & kDataMask;
+ // Turn on continuation bit in the byte we just wrote.
+ *written_byte |= kContinueBit;
value >>= kContinueShift;
- has_next = value != 0;
- // The most significant bit is set when we are not done with the value yet.
- cur_byte |= static_cast<uint32_t>(has_next) << kContinueShift;
- process_byte(cur_byte);
- } while (has_next);
+ written_byte = process_byte(value);
+ } while (value > kDataMask);
}
// Encodes value using variable-length encoding and stores it using the passed
// process_byte function.
-inline void VLQEncode(const std::function<void(byte)>& process_byte,
- int32_t value) {
+template <typename Function>
+inline typename std::enable_if<
+ std::is_same<decltype(std::declval<Function>()(0)), byte*>::value,
+ void>::type
+VLQEncode(Function&& process_byte, int32_t value) {
// This wouldn't handle kMinInt correctly if it ever encountered it.
DCHECK_NE(value, std::numeric_limits<int32_t>::min());
bool is_negative = value < 0;
// Encode sign in least significant bit.
uint32_t bits = static_cast<uint32_t>((is_negative ? -value : value) << 1) |
static_cast<uint32_t>(is_negative);
- VLQEncodeUnsigned(process_byte, bits);
+ VLQEncodeUnsigned(std::forward<Function>(process_byte), bits);
}
// Wrapper of VLQEncode for std::vector backed storage containers.
template <typename A>
inline void VLQEncode(std::vector<byte, A>* data, int32_t value) {
- VLQEncode([data](byte value) { data->push_back(value); }, value);
+ VLQEncode(
+ [data](byte value) {
+ data->push_back(value);
+ return &data->back();
+ },
+ value);
}
// Wrapper of VLQEncodeUnsigned for std::vector backed storage containers.
template <typename A>
inline void VLQEncodeUnsigned(std::vector<byte, A>* data, uint32_t value) {
- VLQEncodeUnsigned([data](byte value) { data->push_back(value); }, value);
+ VLQEncodeUnsigned(
+ [data](byte value) {
+ data->push_back(value);
+ return &data->back();
+ },
+ value);
+}
+
+// Decodes a variable-length encoded unsigned value from bytes returned by
+// successive calls to the given function.
+template <typename GetNextFunction>
+inline typename std::enable_if<
+ std::is_same<decltype(std::declval<GetNextFunction>()()), byte>::value,
+ uint32_t>::type
+VLQDecodeUnsigned(GetNextFunction&& get_next) {
+ byte cur_byte = get_next();
+ // Single byte fast path; no need to mask.
+ if (cur_byte <= kDataMask) {
+ return cur_byte;
+ }
+ uint32_t bits = cur_byte & kDataMask;
+ for (int shift = kContinueShift; shift <= 32; shift += kContinueShift) {
+ byte cur_byte = get_next();
+ bits |= (cur_byte & kDataMask) << shift;
+ if (cur_byte <= kDataMask) break;
+ }
+ return bits;
}
// Decodes a variable-length encoded unsigned value stored in contiguous memory
// starting at data_start + index, updating index to where the next encoded
// value starts.
inline uint32_t VLQDecodeUnsigned(byte* data_start, int* index) {
- uint32_t bits = 0;
- for (int shift = 0; true; shift += kContinueShift) {
- byte cur_byte = data_start[(*index)++];
- bits += (cur_byte & kDataMask) << shift;
- if ((cur_byte & kContinueMask) == 0) break;
- }
- return bits;
+ return VLQDecodeUnsigned([&] { return data_start[(*index)++]; });
}
// Decodes a variable-length encoded value stored in contiguous memory starting
diff --git a/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index eca2b47cc0e..bfccef90f8f 100644
--- a/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/chromium/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -124,7 +124,7 @@ void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Call(temp);
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
@@ -133,7 +133,7 @@ void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
Register temp = temps.AcquireScratch();
__ LoadEntryFromBuiltinIndex(builtin, temp);
__ Jump(temp);
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::Test(Register value, int mask) {
@@ -151,7 +151,7 @@ void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
@@ -198,10 +198,10 @@ void BaselineAssembler::Move(MemOperand output, Register source) {
__ str(source, output);
}
void BaselineAssembler::Move(Register output, ExternalReference reference) {
- __ mov(output, Operand(reference));
+ __ Move32BitImmediate(output, Operand(reference));
}
void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
- __ mov(output, Operand(value));
+ __ Move32BitImmediate(output, Operand(value));
}
void BaselineAssembler::Move(Register output, int32_t value) {
__ mov(output, Operand(value));
@@ -351,7 +351,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
Register value) {
__ str(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h
index ff2b6d1a831..d7f0a606d3b 100644
--- a/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h
+++ b/chromium/v8/src/baseline/arm/baseline-compiler-arm-inl.h
@@ -19,9 +19,9 @@ void BaselineCompiler::Prologue() {
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
diff --git a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index 27b7c2b2d8d..63e90df4d62 100644
--- a/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/chromium/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -167,7 +167,7 @@ void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ CompareObjectType(map, type, type, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedValue);
@@ -422,7 +422,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
Register value) {
__ StoreTaggedField(value, FieldMemOperand(target, offset));
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
index e567be41d24..0807c5434ac 100644
--- a/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
+++ b/chromium/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h
@@ -18,9 +18,9 @@ void BaselineCompiler::Prologue() {
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
__ masm()->AssertSpAligned();
PrologueFillFrame();
diff --git a/chromium/v8/src/baseline/baseline-assembler-inl.h b/chromium/v8/src/baseline/baseline-assembler-inl.h
index 8fd54d63a2f..401062517f6 100644
--- a/chromium/v8/src/baseline/baseline-assembler-inl.h
+++ b/chromium/v8/src/baseline/baseline-assembler-inl.h
@@ -8,12 +8,13 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include <type_traits>
#include <unordered_map>
#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/interpreter/bytecode-register.h"
#include "src/objects/feedback-cell.h"
#include "src/objects/js-function.h"
@@ -27,6 +28,8 @@
#include "src/baseline/ia32/baseline-assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -41,10 +44,10 @@ void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) {
__ GetCode(isolate, desc);
}
int BaselineAssembler::pc_offset() const { return __ pc_offset(); }
-bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); }
void BaselineAssembler::CodeEntry() const { __ CodeEntry(); }
void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); }
void BaselineAssembler::RecordComment(const char* string) {
+ if (!FLAG_code_comments) return;
__ RecordComment(string);
}
void BaselineAssembler::Trap() { __ Trap(); }
diff --git a/chromium/v8/src/baseline/baseline-assembler.h b/chromium/v8/src/baseline/baseline-assembler.h
index 38874d556f0..7c46cd5e2c4 100644
--- a/chromium/v8/src/baseline/baseline-assembler.h
+++ b/chromium/v8/src/baseline/baseline-assembler.h
@@ -8,7 +8,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/codegen/macro-assembler.h"
#include "src/objects/tagged-index.h"
@@ -32,10 +32,9 @@ class BaselineAssembler {
inline void GetCode(Isolate* isolate, CodeDesc* desc);
inline int pc_offset() const;
- inline bool emit_debug_code() const;
inline void CodeEntry() const;
inline void ExceptionHandler() const;
- inline void RecordComment(const char* string);
+ V8_INLINE void RecordComment(const char* string);
inline void Trap();
inline void DebugBreak();
diff --git a/chromium/v8/src/baseline/baseline-compiler.cc b/chromium/v8/src/baseline/baseline-compiler.cc
index 3d599c11fd5..9c6e3f10e6d 100644
--- a/chromium/v8/src/baseline/baseline-compiler.cc
+++ b/chromium/v8/src/baseline/baseline-compiler.cc
@@ -4,8 +4,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
+#include "src/base/bits.h"
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/baseline/baseline-compiler.h"
@@ -19,7 +20,7 @@
#include "src/builtins/builtins.h"
#include "src/codegen/assembler.h"
#include "src/codegen/compiler.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/common/globals.h"
@@ -40,6 +41,8 @@
#include "src/baseline/ia32/baseline-compiler-ia32-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/baseline/arm/baseline-compiler-arm-inl.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -48,9 +51,9 @@ namespace v8 {
namespace internal {
namespace baseline {
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ByteArray> BytecodeOffsetTableBuilder::ToBytecodeOffsetTable(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (bytes_.empty()) return isolate->factory()->empty_byte_array();
Handle<ByteArray> table = isolate->factory()->NewByteArray(
static_cast<int>(bytes_.size()), AllocationType::kOld);
@@ -68,6 +71,7 @@ bool Clobbers(Register target, TaggedIndex index) { return false; }
bool Clobbers(Register target, int32_t imm) { return false; }
bool Clobbers(Register target, RootIndex index) { return false; }
bool Clobbers(Register target, interpreter::Register reg) { return false; }
+bool Clobbers(Register target, interpreter::RegisterList list) { return false; }
// We don't know what's inside machine registers or operands, so assume they
// match.
@@ -97,134 +101,151 @@ bool MachineTypeMatches(MachineType type, interpreter::Register reg) {
return type.IsTagged();
}
-template <typename... Args>
+template <typename Descriptor, typename... Args>
struct CheckArgsHelper;
-template <>
-struct CheckArgsHelper<> {
- static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i) {
- if (descriptor.AllowVarArgs()) {
- CHECK_GE(i, descriptor.GetParameterCount());
+template <typename Descriptor>
+struct CheckArgsHelper<Descriptor> {
+ static void Check(BaselineAssembler* masm, int i) {
+ if (Descriptor::AllowVarArgs()) {
+ CHECK_GE(i, Descriptor::GetParameterCount());
} else {
- CHECK_EQ(i, descriptor.GetParameterCount());
+ CHECK_EQ(i, Descriptor::GetParameterCount());
}
}
};
-template <typename Arg, typename... Args>
-struct CheckArgsHelper<Arg, Args...> {
- static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, Arg arg, Args... args) {
- if (i >= descriptor.GetParameterCount()) {
- CHECK(descriptor.AllowVarArgs());
+template <typename Descriptor, typename Arg, typename... Args>
+struct CheckArgsHelper<Descriptor, Arg, Args...> {
+ static void Check(BaselineAssembler* masm, int i, Arg arg, Args... args) {
+ if (i >= Descriptor::GetParameterCount()) {
+ CHECK(Descriptor::AllowVarArgs());
return;
}
- CHECK(MachineTypeMatches(descriptor.GetParameterType(i), arg));
- CheckArgsHelper<Args...>::Check(masm, descriptor, i + 1, args...);
+ CHECK(MachineTypeMatches(Descriptor().GetParameterType(i), arg));
+ CheckArgsHelper<Descriptor, Args...>::Check(masm, i + 1, args...);
}
};
-template <typename... Args>
-struct CheckArgsHelper<interpreter::RegisterList, Args...> {
- static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, interpreter::RegisterList list, Args... args) {
+template <typename Descriptor, typename... Args>
+struct CheckArgsHelper<Descriptor, interpreter::RegisterList, Args...> {
+ static void Check(BaselineAssembler* masm, int i,
+ interpreter::RegisterList list, Args... args) {
for (int reg_index = 0; reg_index < list.register_count();
++reg_index, ++i) {
- if (i >= descriptor.GetParameterCount()) {
- CHECK(descriptor.AllowVarArgs());
+ if (i >= Descriptor::GetParameterCount()) {
+ CHECK(Descriptor::AllowVarArgs());
return;
}
- CHECK(
- MachineTypeMatches(descriptor.GetParameterType(i), list[reg_index]));
+ CHECK(MachineTypeMatches(Descriptor().GetParameterType(i),
+ list[reg_index]));
}
- CheckArgsHelper<Args...>::Check(masm, descriptor, i, args...);
+ CheckArgsHelper<Descriptor, Args...>::Check(masm, i, args...);
}
};
-template <typename... Args>
-void CheckArgs(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- Args... args) {
- CheckArgsHelper<Args...>::Check(masm, descriptor, 0, args...);
+template <typename Descriptor, typename... Args>
+void CheckArgs(BaselineAssembler* masm, Args... args) {
+ CheckArgsHelper<Descriptor, Args...>::Check(masm, 0, args...);
+}
+
+void CheckSettingDoesntClobber(Register target) {}
+template <typename Arg, typename... Args>
+void CheckSettingDoesntClobber(Register target, Arg arg, Args... args) {
+ DCHECK(!Clobbers(target, arg));
+ CheckSettingDoesntClobber(target, args...);
}
#else // DEBUG
-template <typename... Args>
+template <typename Descriptor, typename... Args>
void CheckArgs(Args... args) {}
+template <typename... Args>
+void CheckSettingDoesntClobber(Register target, Args... args) {}
+
#endif // DEBUG
-template <typename... Args>
+template <typename Descriptor, int ArgIndex, bool kIsRegister, typename... Args>
struct ArgumentSettingHelper;
-template <>
-struct ArgumentSettingHelper<> {
- static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i) {}
- static void CheckSettingDoesntClobber(Register target, int arg_index) {}
+template <typename Descriptor, int ArgIndex, bool kIsRegister>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, kIsRegister> {
+ static void Set(BaselineAssembler* masm) {
+ // Should only ever be called for the end of register arguments.
+ STATIC_ASSERT(ArgIndex == Descriptor::GetRegisterParameterCount());
+ }
};
-template <typename Arg, typename... Args>
-struct ArgumentSettingHelper<Arg, Args...> {
- static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, Arg arg, Args... args) {
- if (i < descriptor.GetRegisterParameterCount()) {
- Register target = descriptor.GetRegisterParameter(i);
- ArgumentSettingHelper<Args...>::CheckSettingDoesntClobber(target, i + 1,
- args...);
- masm->Move(target, arg);
- ArgumentSettingHelper<Args...>::Set(masm, descriptor, i + 1, args...);
- } else if (descriptor.GetStackArgumentOrder() ==
- StackArgumentOrder::kDefault) {
- masm->Push(arg, args...);
- } else {
- masm->PushReverse(arg, args...);
- }
+template <typename Descriptor, int ArgIndex, typename Arg, typename... Args>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, true, Arg, Args...> {
+ static void Set(BaselineAssembler* masm, Arg arg, Args... args) {
+ STATIC_ASSERT(ArgIndex < Descriptor::GetRegisterParameterCount());
+ Register target = Descriptor::GetRegisterParameter(ArgIndex);
+ CheckSettingDoesntClobber(target, args...);
+ masm->Move(target, arg);
+ ArgumentSettingHelper<Descriptor, ArgIndex + 1,
+ (ArgIndex + 1 <
+ Descriptor::GetRegisterParameterCount()),
+ Args...>::Set(masm, args...);
}
- static void CheckSettingDoesntClobber(Register target, int arg_index, Arg arg,
- Args... args) {
- DCHECK(!Clobbers(target, arg));
- ArgumentSettingHelper<Args...>::CheckSettingDoesntClobber(
- target, arg_index + 1, args...);
+};
+
+template <typename Descriptor, int ArgIndex>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, true,
+ interpreter::RegisterList> {
+ static void Set(BaselineAssembler* masm, interpreter::RegisterList list) {
+ STATIC_ASSERT(ArgIndex < Descriptor::GetRegisterParameterCount());
+ DCHECK_EQ(ArgIndex + list.register_count(),
+ Descriptor::GetRegisterParameterCount());
+ for (int i = 0; ArgIndex + i < Descriptor::GetRegisterParameterCount();
+ ++i) {
+ Register target = Descriptor::GetRegisterParameter(ArgIndex + i);
+ masm->Move(target, masm->RegisterFrameOperand(list[i]));
+ }
}
};
-// Specialization for interpreter::RegisterList which iterates it.
-// RegisterLists are only allowed to be the last argument.
-template <>
-struct ArgumentSettingHelper<interpreter::RegisterList> {
- static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor,
- int i, interpreter::RegisterList list) {
- // Either all the values are in machine registers, or they're all on the
- // stack.
- if (i < descriptor.GetRegisterParameterCount()) {
- for (int reg_index = 0; reg_index < list.register_count();
- ++reg_index, ++i) {
- Register target = descriptor.GetRegisterParameter(i);
- masm->Move(target, masm->RegisterFrameOperand(list[reg_index]));
- }
- } else if (descriptor.GetStackArgumentOrder() ==
- StackArgumentOrder::kDefault) {
- masm->Push(list);
+template <typename Descriptor, int ArgIndex, typename Arg, typename... Args>
+struct ArgumentSettingHelper<Descriptor, ArgIndex, false, Arg, Args...> {
+ static void Set(BaselineAssembler* masm, Arg arg, Args... args) {
+ if (Descriptor::kStackArgumentOrder == StackArgumentOrder::kDefault) {
+ masm->Push(arg, args...);
} else {
- masm->PushReverse(list);
+ masm->PushReverse(arg, args...);
}
}
- static void CheckSettingDoesntClobber(Register target, int arg_index,
- interpreter::RegisterList arg) {}
};
-template <typename... Args>
-void MoveArgumentsForDescriptor(BaselineAssembler* masm,
- CallInterfaceDescriptor descriptor,
- Args... args) {
- CheckArgs(masm, descriptor, args...);
- ArgumentSettingHelper<Args...>::Set(masm, descriptor, 0, args...);
+template <Builtins::Name kBuiltin, typename... Args>
+void MoveArgumentsForBuiltin(BaselineAssembler* masm, Args... args) {
+ using Descriptor = typename CallInterfaceDescriptorFor<kBuiltin>::type;
+ CheckArgs<Descriptor>(masm, args...);
+ ArgumentSettingHelper<Descriptor, 0,
+ (0 < Descriptor::GetRegisterParameterCount()),
+ Args...>::Set(masm, args...);
+ if (Descriptor::HasContextParameter()) {
+ masm->LoadContext(Descriptor::ContextRegister());
+ }
}
} // namespace detail
+namespace {
+// Rough upper-bound estimate. Copying the data is most likely more expensive
+// than pre-allocating a large enough buffer.
+#ifdef V8_TARGET_ARCH_IA32
+const int kAverageBytecodeToInstructionRatio = 5;
+#else
+const int kAverageBytecodeToInstructionRatio = 7;
+#endif
+std::unique_ptr<AssemblerBuffer> AllocateBuffer(
+ Handle<BytecodeArray> bytecodes) {
+ int estimated_size = bytecodes->length() * kAverageBytecodeToInstructionRatio;
+ return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB));
+}
+} // namespace
+
BaselineCompiler::BaselineCompiler(
Isolate* isolate, Handle<SharedFunctionInfo> shared_function_info,
Handle<BytecodeArray> bytecode)
@@ -232,20 +253,26 @@ BaselineCompiler::BaselineCompiler(
stats_(isolate->counters()->runtime_call_stats()),
shared_function_info_(shared_function_info),
bytecode_(bytecode),
- masm_(isolate, CodeObjectRequired::kNo),
+ masm_(isolate, CodeObjectRequired::kNo, AllocateBuffer(bytecode)),
basm_(&masm_),
iterator_(bytecode_),
zone_(isolate->allocator(), ZONE_NAME),
labels_(zone_.NewArray<BaselineLabels*>(bytecode_->length())) {
MemsetPointer(labels_, nullptr, bytecode_->length());
+
+ // Empirically determined expected size of the offset table at the 95th %ile,
+ // based on the size of the bytecode, to be:
+ //
+ // 16 + (bytecode size) / 4
+ bytecode_offset_table_builder_.Reserve(
+ base::bits::RoundUpToPowerOfTwo(16 + bytecode_->Size() / 4));
}
#define __ basm_.
void BaselineCompiler::GenerateCode() {
{
- RuntimeCallTimerScope runtimeTimer(
- stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
+ RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselinePreVisit);
for (; !iterator_.done(); iterator_.Advance()) {
PreVisitSingleBytecode();
}
@@ -257,8 +284,7 @@ void BaselineCompiler::GenerateCode() {
__ CodeEntry();
{
- RuntimeCallTimerScope runtimeTimer(
- stats_, RuntimeCallCounterId::kCompileBaselineVisit);
+ RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselineVisit);
Prologue();
AddPosition();
for (; !iterator_.done(); iterator_.Advance()) {
@@ -453,7 +479,7 @@ void BaselineCompiler::VisitSingleBytecode() {
}
void BaselineCompiler::VerifyFrame() {
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ RecordComment("[ Verify frame");
__ RecordComment(" -- Verify frame size");
VerifyFrameSize();
@@ -552,28 +578,18 @@ Label* BaselineCompiler::BuildForwardJumpLabel() {
return &threaded_label->label;
}
-template <typename... Args>
-void BaselineCompiler::CallBuiltin(Builtins::Name builtin, Args... args) {
+template <Builtins::Name kBuiltin, typename... Args>
+void BaselineCompiler::CallBuiltin(Args... args) {
__ RecordComment("[ CallBuiltin");
- CallInterfaceDescriptor descriptor =
- Builtins::CallInterfaceDescriptorFor(builtin);
- detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...);
- if (descriptor.HasContextParameter()) {
- __ LoadContext(descriptor.ContextRegister());
- }
- __ CallBuiltin(builtin);
+ detail::MoveArgumentsForBuiltin<kBuiltin>(&basm_, args...);
+ __ CallBuiltin(kBuiltin);
__ RecordComment("]");
}
-template <typename... Args>
-void BaselineCompiler::TailCallBuiltin(Builtins::Name builtin, Args... args) {
- CallInterfaceDescriptor descriptor =
- Builtins::CallInterfaceDescriptorFor(builtin);
- detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...);
- if (descriptor.HasContextParameter()) {
- __ LoadContext(descriptor.ContextRegister());
- }
- __ TailCallBuiltin(builtin);
+template <Builtins::Name kBuiltin, typename... Args>
+void BaselineCompiler::TailCallBuiltin(Args... args) {
+ detail::MoveArgumentsForBuiltin<kBuiltin>(&basm_, args...);
+ __ TailCallBuiltin(kBuiltin);
}
template <typename... Args>
@@ -584,27 +600,17 @@ void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) {
}
// Returns into kInterpreterAccumulatorRegister
-void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Register reg,
- Label* label, Label::Distance distance) {
- Label end;
- Label::Distance end_distance = Label::kNear;
-
- Label* true_label = do_jump_if_true ? label : &end;
- Label::Distance true_distance = do_jump_if_true ? distance : end_distance;
- Label* false_label = do_jump_if_true ? &end : label;
- Label::Distance false_distance = do_jump_if_true ? end_distance : distance;
-
- BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
- Register to_boolean = scratch_scope.AcquireScratch();
- {
- SaveAccumulatorScope accumulator_scope(&basm_);
- CallBuiltin(Builtins::kToBoolean, reg);
- __ Move(to_boolean, kInterpreterAccumulatorRegister);
- }
- __ JumpIfRoot(to_boolean, RootIndex::kTrueValue, true_label, true_distance);
- if (false_label != &end) __ Jump(false_label, false_distance);
-
- __ Bind(&end);
+void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Label* label,
+ Label::Distance distance) {
+ CallBuiltin<Builtins::kToBooleanForBaselineJump>(
+ kInterpreterAccumulatorRegister);
+ // ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and
+ // the original value into kInterpreterAccumulatorRegister, so we don't have
+ // to worry about it getting clobbered.
+ STATIC_ASSERT(kReturnRegister0 == kInterpreterAccumulatorRegister);
+ __ Cmp(kReturnRegister1, Smi::FromInt(0));
+ __ JumpIf(do_jump_if_true ? Condition::kNotEqual : Condition::kEqual, label,
+ distance);
}
void BaselineCompiler::VisitLdaZero() {
@@ -641,22 +647,21 @@ void BaselineCompiler::VisitLdaConstant() {
}
void BaselineCompiler::VisitLdaGlobal() {
- CallBuiltin(Builtins::kLoadGlobalICBaseline,
- Constant<Name>(0), // name
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kLoadGlobalICBaseline>(Constant<Name>(0), // name
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitLdaGlobalInsideTypeof() {
- CallBuiltin(Builtins::kLoadGlobalICInsideTypeofBaseline,
- Constant<Name>(0), // name
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kLoadGlobalICInsideTypeofBaseline>(
+ Constant<Name>(0), // name
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitStaGlobal() {
- CallBuiltin(Builtins::kStoreGlobalICBaseline,
- Constant<Name>(0), // name
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kStoreGlobalICBaseline>(
+ Constant<Name>(0), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitPushContext() {
@@ -730,13 +735,13 @@ void BaselineCompiler::VisitLdaLookupSlot() {
}
void BaselineCompiler::VisitLdaLookupContextSlot() {
- CallBuiltin(Builtins::kLookupContextBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupContextBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitLdaLookupGlobalSlot() {
- CallBuiltin(Builtins::kLookupGlobalICBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupGlobalICBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() {
@@ -744,13 +749,13 @@ void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() {
}
void BaselineCompiler::VisitLdaLookupContextSlotInsideTypeof() {
- CallBuiltin(Builtins::kLookupContextInsideTypeofBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupContextInsideTypeofBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitLdaLookupGlobalSlotInsideTypeof() {
- CallBuiltin(Builtins::kLookupGlobalICInsideTypeofBaseline, Constant<Name>(0),
- UintAsTagged(2), IndexAsTagged(1));
+ CallBuiltin<Builtins::kLookupGlobalICInsideTypeofBaseline>(
+ Constant<Name>(0), UintAsTagged(2), IndexAsTagged(1));
}
void BaselineCompiler::VisitStaLookupSlot() {
@@ -793,14 +798,13 @@ void BaselineCompiler::VisitMov() {
}
void BaselineCompiler::VisitLdaNamedProperty() {
- CallBuiltin(Builtins::kLoadICBaseline,
- RegisterOperand(0), // object
- Constant<Name>(1), // name
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kLoadICBaseline>(RegisterOperand(0), // object
+ Constant<Name>(1), // name
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitLdaNamedPropertyNoFeedback() {
- CallBuiltin(Builtins::kGetProperty, RegisterOperand(0), Constant<Name>(1));
+ CallBuiltin<Builtins::kGetProperty>(RegisterOperand(0), Constant<Name>(1));
}
void BaselineCompiler::VisitLdaNamedPropertyFromSuper() {
@@ -808,19 +812,19 @@ void BaselineCompiler::VisitLdaNamedPropertyFromSuper() {
LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kLoadSuperICBaseline,
- RegisterOperand(0), // object
- LoadWithReceiverAndVectorDescriptor::
- LookupStartObjectRegister(), // lookup start
- Constant<Name>(1), // name
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kLoadSuperICBaseline>(
+ RegisterOperand(0), // object
+ LoadWithReceiverAndVectorDescriptor::
+ LookupStartObjectRegister(), // lookup start
+ Constant<Name>(1), // name
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitLdaKeyedProperty() {
- CallBuiltin(Builtins::kKeyedLoadICBaseline,
- RegisterOperand(0), // object
- kInterpreterAccumulatorRegister, // key
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kKeyedLoadICBaseline>(
+ RegisterOperand(0), // object
+ kInterpreterAccumulatorRegister, // key
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitLdaModuleVariable() {
@@ -878,11 +882,11 @@ void BaselineCompiler::VisitStaModuleVariable() {
}
void BaselineCompiler::VisitStaNamedProperty() {
- CallBuiltin(Builtins::kStoreICBaseline,
- RegisterOperand(0), // object
- Constant<Name>(1), // name
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kStoreICBaseline>(
+ RegisterOperand(0), // object
+ Constant<Name>(1), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitStaNamedPropertyNoFeedback() {
@@ -900,19 +904,19 @@ void BaselineCompiler::VisitStaNamedOwnProperty() {
}
void BaselineCompiler::VisitStaKeyedProperty() {
- CallBuiltin(Builtins::kKeyedStoreICBaseline,
- RegisterOperand(0), // object
- RegisterOperand(1), // key
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kKeyedStoreICBaseline>(
+ RegisterOperand(0), // object
+ RegisterOperand(1), // key
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitStaInArrayLiteral() {
- CallBuiltin(Builtins::kStoreInArrayLiteralICBaseline,
- RegisterOperand(0), // object
- RegisterOperand(1), // name
- kInterpreterAccumulatorRegister, // value
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kStoreInArrayLiteralICBaseline>(
+ RegisterOperand(0), // object
+ RegisterOperand(1), // name
+ kInterpreterAccumulatorRegister, // value
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitStaDataPropertyInLiteral() {
@@ -934,140 +938,149 @@ void BaselineCompiler::VisitCollectTypeProfile() {
}
void BaselineCompiler::VisitAdd() {
- CallBuiltin(Builtins::kAdd_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kAdd_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitSub() {
- CallBuiltin(Builtins::kSubtract_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kSubtract_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitMul() {
- CallBuiltin(Builtins::kMultiply_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kMultiply_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitDiv() {
- CallBuiltin(Builtins::kDivide_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kDivide_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitMod() {
- CallBuiltin(Builtins::kModulus_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kModulus_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitExp() {
- CallBuiltin(Builtins::kExponentiate_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kExponentiate_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitBitwiseOr() {
- CallBuiltin(Builtins::kBitwiseOr_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kBitwiseOr_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitBitwiseXor() {
- CallBuiltin(Builtins::kBitwiseXor_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kBitwiseXor_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitBitwiseAnd() {
- CallBuiltin(Builtins::kBitwiseAnd_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kBitwiseAnd_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitShiftLeft() {
- CallBuiltin(Builtins::kShiftLeft_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kShiftLeft_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitShiftRight() {
- CallBuiltin(Builtins::kShiftRight_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
+ CallBuiltin<Builtins::kShiftRight_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitShiftRightLogical() {
- CallBuiltin(Builtins::kShiftRightLogical_Baseline, RegisterOperand(0),
- kInterpreterAccumulatorRegister, Index(1));
-}
-
-void BaselineCompiler::BuildBinopWithConstant(Builtins::Name builtin_name) {
- CallBuiltin(builtin_name, kInterpreterAccumulatorRegister, IntAsSmi(0),
- Index(1));
+ CallBuiltin<Builtins::kShiftRightLogical_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitAddSmi() {
- BuildBinopWithConstant(Builtins::kAdd_Baseline);
+ CallBuiltin<Builtins::kAdd_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitSubSmi() {
- BuildBinopWithConstant(Builtins::kSubtract_Baseline);
+ CallBuiltin<Builtins::kSubtract_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitMulSmi() {
- BuildBinopWithConstant(Builtins::kMultiply_Baseline);
+ CallBuiltin<Builtins::kMultiply_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitDivSmi() {
- BuildBinopWithConstant(Builtins::kDivide_Baseline);
+ CallBuiltin<Builtins::kDivide_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitModSmi() {
- BuildBinopWithConstant(Builtins::kModulus_Baseline);
+ CallBuiltin<Builtins::kModulus_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitExpSmi() {
- BuildBinopWithConstant(Builtins::kExponentiate_Baseline);
+ CallBuiltin<Builtins::kExponentiate_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseOrSmi() {
- BuildBinopWithConstant(Builtins::kBitwiseOr_Baseline);
+ CallBuiltin<Builtins::kBitwiseOr_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseXorSmi() {
- BuildBinopWithConstant(Builtins::kBitwiseXor_Baseline);
+ CallBuiltin<Builtins::kBitwiseXor_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitBitwiseAndSmi() {
- BuildBinopWithConstant(Builtins::kBitwiseAnd_Baseline);
+ CallBuiltin<Builtins::kBitwiseAnd_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftLeftSmi() {
- BuildBinopWithConstant(Builtins::kShiftLeft_Baseline);
+ CallBuiltin<Builtins::kShiftLeft_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftRightSmi() {
- BuildBinopWithConstant(Builtins::kShiftRight_Baseline);
+ CallBuiltin<Builtins::kShiftRight_Baseline>(kInterpreterAccumulatorRegister,
+ IntAsSmi(0), Index(1));
}
void BaselineCompiler::VisitShiftRightLogicalSmi() {
- BuildBinopWithConstant(Builtins::kShiftRightLogical_Baseline);
+ CallBuiltin<Builtins::kShiftRightLogical_Baseline>(
+ kInterpreterAccumulatorRegister, IntAsSmi(0), Index(1));
}
-void BaselineCompiler::BuildUnop(Builtins::Name builtin_name) {
- CallBuiltin(builtin_name,
- kInterpreterAccumulatorRegister, // value
- Index(0)); // slot
+void BaselineCompiler::VisitInc() {
+ CallBuiltin<Builtins::kIncrement_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
-void BaselineCompiler::VisitInc() { BuildUnop(Builtins::kIncrement_Baseline); }
-
-void BaselineCompiler::VisitDec() { BuildUnop(Builtins::kDecrement_Baseline); }
+void BaselineCompiler::VisitDec() {
+ CallBuiltin<Builtins::kDecrement_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
+}
-void BaselineCompiler::VisitNegate() { BuildUnop(Builtins::kNegate_Baseline); }
+void BaselineCompiler::VisitNegate() {
+ CallBuiltin<Builtins::kNegate_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
+}
void BaselineCompiler::VisitBitwiseNot() {
- BuildUnop(Builtins::kBitwiseNot_Baseline);
+ CallBuiltin<Builtins::kBitwiseNot_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
void BaselineCompiler::VisitToBooleanLogicalNot() {
SelectBooleanConstant(kInterpreterAccumulatorRegister,
[&](Label* if_true, Label::Distance distance) {
- JumpIfToBoolean(false,
- kInterpreterAccumulatorRegister,
- if_true, distance);
+ JumpIfToBoolean(false, if_true, distance);
});
}
@@ -1081,23 +1094,23 @@ void BaselineCompiler::VisitLogicalNot() {
}
void BaselineCompiler::VisitTypeOf() {
- CallBuiltin(Builtins::kTypeof, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kTypeof>(kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitDeletePropertyStrict() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch();
__ Move(scratch, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch,
- Smi::FromEnum(LanguageMode::kStrict));
+ CallBuiltin<Builtins::kDeleteProperty>(RegisterOperand(0), scratch,
+ Smi::FromEnum(LanguageMode::kStrict));
}
void BaselineCompiler::VisitDeletePropertySloppy() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register scratch = scratch_scope.AcquireScratch();
__ Move(scratch, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch,
- Smi::FromEnum(LanguageMode::kSloppy));
+ CallBuiltin<Builtins::kDeleteProperty>(RegisterOperand(0), scratch,
+ Smi::FromEnum(LanguageMode::kSloppy));
}
void BaselineCompiler::VisitGetSuperConstructor() {
@@ -1106,87 +1119,115 @@ void BaselineCompiler::VisitGetSuperConstructor() {
__ LoadPrototype(prototype, kInterpreterAccumulatorRegister);
StoreRegister(0, prototype);
}
-template <typename... Args>
-void BaselineCompiler::BuildCall(ConvertReceiverMode mode, uint32_t slot,
- uint32_t arg_count, Args... args) {
- Builtins::Name builtin;
+
+namespace {
+constexpr Builtins::Name ConvertReceiverModeToCompactBuiltin(
+ ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kAny:
- builtin = Builtins::kCall_ReceiverIsAny_Baseline;
+ return Builtins::kCall_ReceiverIsAny_Baseline_Compact;
break;
case ConvertReceiverMode::kNullOrUndefined:
- builtin = Builtins::kCall_ReceiverIsNullOrUndefined_Baseline;
+ return Builtins::kCall_ReceiverIsNullOrUndefined_Baseline_Compact;
break;
case ConvertReceiverMode::kNotNullOrUndefined:
- builtin = Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline;
+ return Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline_Compact;
break;
- default:
- UNREACHABLE();
}
- CallBuiltin(builtin,
- RegisterOperand(0), // kFunction
- arg_count, // kActualArgumentsCount
- slot, // kSlot
- args...); // Arguments
+}
+constexpr Builtins::Name ConvertReceiverModeToBuiltin(
+ ConvertReceiverMode mode) {
+ switch (mode) {
+ case ConvertReceiverMode::kAny:
+ return Builtins::kCall_ReceiverIsAny_Baseline;
+ break;
+ case ConvertReceiverMode::kNullOrUndefined:
+ return Builtins::kCall_ReceiverIsNullOrUndefined_Baseline;
+ break;
+ case ConvertReceiverMode::kNotNullOrUndefined:
+ return Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline;
+ break;
+ }
+}
+} // namespace
+
+template <ConvertReceiverMode kMode, typename... Args>
+void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count,
+ Args... args) {
+ uint32_t bitfield;
+ if (CallTrampoline_Baseline_CompactDescriptor::EncodeBitField(arg_count, slot,
+ &bitfield)) {
+ CallBuiltin<ConvertReceiverModeToCompactBuiltin(kMode)>(
+ RegisterOperand(0), // kFunction
+ bitfield, // kActualArgumentsCount | kSlot
+ args...); // Arguments
+ } else {
+ CallBuiltin<ConvertReceiverModeToBuiltin(kMode)>(
+ RegisterOperand(0), // kFunction
+ arg_count, // kActualArgumentsCount
+ slot, // kSlot
+ args...); // Arguments
+ }
}
void BaselineCompiler::VisitCallAnyReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
- BuildCall(ConvertReceiverMode::kAny, Index(3), arg_count, args);
+ BuildCall<ConvertReceiverMode::kAny>(Index(3), arg_count, args);
}
void BaselineCompiler::VisitCallProperty() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), arg_count,
- args);
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(3), arg_count,
+ args);
}
void BaselineCompiler::VisitCallProperty0() {
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(2), 0,
- RegisterOperand(1));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(2), 0,
+ RegisterOperand(1));
}
void BaselineCompiler::VisitCallProperty1() {
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), 1,
- RegisterOperand(1), RegisterOperand(2));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
+ Index(3), 1, RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallProperty2() {
- BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(4), 2,
- RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
+ Index(4), 2, RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
}
void BaselineCompiler::VisitCallUndefinedReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), arg_count,
- RootIndex::kUndefinedValue, args);
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(3), arg_count, RootIndex::kUndefinedValue, args);
}
void BaselineCompiler::VisitCallUndefinedReceiver0() {
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(1), 0,
- RootIndex::kUndefinedValue);
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(Index(1), 0,
+ RootIndex::kUndefinedValue);
}
void BaselineCompiler::VisitCallUndefinedReceiver1() {
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(2), 1,
- RootIndex::kUndefinedValue, RegisterOperand(1));
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(2), 1, RootIndex::kUndefinedValue, RegisterOperand(1));
}
void BaselineCompiler::VisitCallUndefinedReceiver2() {
- BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), 2,
- RootIndex::kUndefinedValue, RegisterOperand(1), RegisterOperand(2));
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(3), 2, RootIndex::kUndefinedValue, RegisterOperand(1),
+ RegisterOperand(2));
}
void BaselineCompiler::VisitCallNoFeedback() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- CallBuiltin(Builtins::kCall_ReceiverIsAny,
- RegisterOperand(0), // kFunction
- arg_count - 1, // kActualArgumentsCount
- args);
+ CallBuiltin<Builtins::kCall_ReceiverIsAny>(
+ RegisterOperand(0), // kFunction
+ arg_count - 1, // kActualArgumentsCount
+ args);
}
void BaselineCompiler::VisitCallWithSpread() {
@@ -1198,12 +1239,12 @@ void BaselineCompiler::VisitCallWithSpread() {
uint32_t arg_count = args.register_count() - 1; // Remove receiver.
- CallBuiltin(Builtins::kCallWithSpread_Baseline,
- RegisterOperand(0), // kFunction
- arg_count, // kActualArgumentsCount
- spread_register, // kSpread
- Index(3), // kSlot
- args);
+ CallBuiltin<Builtins::kCallWithSpread_Baseline>(
+ RegisterOperand(0), // kFunction
+ arg_count, // kActualArgumentsCount
+ spread_register, // kSpread
+ Index(3), // kSlot
+ args);
}
void BaselineCompiler::VisitCallRuntime() {
@@ -1226,11 +1267,11 @@ void BaselineCompiler::VisitCallJSRuntime() {
__ LoadContext(kContextRegister);
__ LoadNativeContextSlot(kJavaScriptCallTargetRegister,
iterator().GetNativeContextIndexOperand(0));
- CallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined,
- kJavaScriptCallTargetRegister, // kFunction
- arg_count, // kActualArgumentsCount
- RootIndex::kUndefinedValue, // kReceiver
- args);
+ CallBuiltin<Builtins::kCall_ReceiverIsNullOrUndefined>(
+ kJavaScriptCallTargetRegister, // kFunction
+ arg_count, // kActualArgumentsCount
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
}
void BaselineCompiler::VisitInvokeIntrinsic() {
@@ -1301,29 +1342,25 @@ void BaselineCompiler::VisitIntrinsicIsSmi(interpreter::RegisterList args) {
void BaselineCompiler::VisitIntrinsicCopyDataProperties(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCopyDataProperties, args);
+ CallBuiltin<Builtins::kCopyDataProperties>(args);
}
void BaselineCompiler::VisitIntrinsicCreateIterResultObject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCreateIterResultObject, args);
+ CallBuiltin<Builtins::kCreateIterResultObject>(args);
}
void BaselineCompiler::VisitIntrinsicHasProperty(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kHasProperty, args);
-}
-
-void BaselineCompiler::VisitIntrinsicToString(interpreter::RegisterList args) {
- CallBuiltin(Builtins::kToString, args);
+ CallBuiltin<Builtins::kHasProperty>(args);
}
void BaselineCompiler::VisitIntrinsicToLength(interpreter::RegisterList args) {
- CallBuiltin(Builtins::kToLength, args);
+ CallBuiltin<Builtins::kToLength>(args);
}
void BaselineCompiler::VisitIntrinsicToObject(interpreter::RegisterList args) {
- CallBuiltin(Builtins::kToObject, args);
+ CallBuiltin<Builtins::kToObject>(args);
}
void BaselineCompiler::VisitIntrinsicCall(interpreter::RegisterList args) {
@@ -1335,20 +1372,20 @@ void BaselineCompiler::VisitIntrinsicCall(interpreter::RegisterList args) {
args = args.PopLeft();
uint32_t arg_count = args.register_count();
- CallBuiltin(Builtins::kCall_ReceiverIsAny,
- kJavaScriptCallTargetRegister, // kFunction
- arg_count - 1, // kActualArgumentsCount
- args);
+ CallBuiltin<Builtins::kCall_ReceiverIsAny>(
+ kJavaScriptCallTargetRegister, // kFunction
+ arg_count - 1, // kActualArgumentsCount
+ args);
}
void BaselineCompiler::VisitIntrinsicCreateAsyncFromSyncIterator(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCreateAsyncFromSyncIteratorBaseline, args[0]);
+ CallBuiltin<Builtins::kCreateAsyncFromSyncIteratorBaseline>(args[0]);
}
void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kCreateGeneratorObject, args);
+ CallBuiltin<Builtins::kCreateGeneratorObject>(args);
}
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
@@ -1370,69 +1407,69 @@ void BaselineCompiler::VisitIntrinsicGeneratorClose(
void BaselineCompiler::VisitIntrinsicGetImportMetaObject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kGetImportMetaObjectBaseline);
+ CallBuiltin<Builtins::kGetImportMetaObjectBaseline>();
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitCaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionAwaitCaught, args);
+ CallBuiltin<Builtins::kAsyncFunctionAwaitCaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitUncaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionAwaitUncaught, args);
+ CallBuiltin<Builtins::kAsyncFunctionAwaitUncaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionEnter(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionEnter, args);
+ CallBuiltin<Builtins::kAsyncFunctionEnter>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionReject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionReject, args);
+ CallBuiltin<Builtins::kAsyncFunctionReject>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncFunctionResolve(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncFunctionResolve, args);
+ CallBuiltin<Builtins::kAsyncFunctionResolve>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitCaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorAwaitCaught, args);
+ CallBuiltin<Builtins::kAsyncGeneratorAwaitCaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitUncaught(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorAwaitUncaught, args);
+ CallBuiltin<Builtins::kAsyncGeneratorAwaitUncaught>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorReject(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorReject, args);
+ CallBuiltin<Builtins::kAsyncGeneratorReject>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorResolve(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorResolve, args);
+ CallBuiltin<Builtins::kAsyncGeneratorResolve>(args);
}
void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield(
interpreter::RegisterList args) {
- CallBuiltin(Builtins::kAsyncGeneratorYield, args);
+ CallBuiltin<Builtins::kAsyncGeneratorYield>(args);
}
void BaselineCompiler::VisitConstruct() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
uint32_t arg_count = args.register_count();
- CallBuiltin(Builtins::kConstruct_Baseline,
- RegisterOperand(0), // kFunction
- kInterpreterAccumulatorRegister, // kNewTarget
- arg_count, // kActualArgumentsCount
- Index(3), // kSlot
- RootIndex::kUndefinedValue, // kReceiver
- args);
+ CallBuiltin<Builtins::kConstruct_Baseline>(
+ RegisterOperand(0), // kFunction
+ kInterpreterAccumulatorRegister, // kNewTarget
+ arg_count, // kActualArgumentsCount
+ Index(3), // kSlot
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
}
void BaselineCompiler::VisitConstructWithSpread() {
@@ -1444,51 +1481,50 @@ void BaselineCompiler::VisitConstructWithSpread() {
uint32_t arg_count = args.register_count();
+ using Descriptor =
+ CallInterfaceDescriptorFor<Builtins::kConstructWithSpread_Baseline>::type;
Register new_target =
- Builtins::CallInterfaceDescriptorFor(
- Builtins::kConstructWithSpread_Baseline)
- .GetRegisterParameter(
- ConstructWithSpread_BaselineDescriptor::kNewTarget);
+ Descriptor::GetRegisterParameter(Descriptor::kNewTarget);
__ Move(new_target, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kConstructWithSpread_Baseline,
- RegisterOperand(0), // kFunction
- new_target, // kNewTarget
- arg_count, // kActualArgumentsCount
- Index(3), // kSlot
- spread_register, // kSpread
- RootIndex::kUndefinedValue, // kReceiver
- args);
-}
-
-void BaselineCompiler::BuildCompare(Builtins::Name builtin_name) {
- CallBuiltin(builtin_name, RegisterOperand(0), // lhs
- kInterpreterAccumulatorRegister, // rhs
- Index(1)); // slot
+ CallBuiltin<Builtins::kConstructWithSpread_Baseline>(
+ RegisterOperand(0), // kFunction
+ new_target, // kNewTarget
+ arg_count, // kActualArgumentsCount
+ Index(3), // kSlot
+ spread_register, // kSpread
+ RootIndex::kUndefinedValue, // kReceiver
+ args);
}
void BaselineCompiler::VisitTestEqual() {
- BuildCompare(Builtins::kEqual_Baseline);
+ CallBuiltin<Builtins::kEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestEqualStrict() {
- BuildCompare(Builtins::kStrictEqual_Baseline);
+ CallBuiltin<Builtins::kStrictEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestLessThan() {
- BuildCompare(Builtins::kLessThan_Baseline);
+ CallBuiltin<Builtins::kLessThan_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestGreaterThan() {
- BuildCompare(Builtins::kGreaterThan_Baseline);
+ CallBuiltin<Builtins::kGreaterThan_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestLessThanOrEqual() {
- BuildCompare(Builtins::kLessThanOrEqual_Baseline);
+ CallBuiltin<Builtins::kLessThanOrEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestGreaterThanOrEqual() {
- BuildCompare(Builtins::kGreaterThanOrEqual_Baseline);
+ CallBuiltin<Builtins::kGreaterThanOrEqual_Baseline>(
+ RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1));
}
void BaselineCompiler::VisitTestReferenceEqual() {
@@ -1502,21 +1538,21 @@ void BaselineCompiler::VisitTestReferenceEqual() {
}
void BaselineCompiler::VisitTestInstanceOf() {
- Register callable =
- Builtins::CallInterfaceDescriptorFor(Builtins::kInstanceOf_Baseline)
- .GetRegisterParameter(Compare_BaselineDescriptor::kRight);
+ using Descriptor =
+ CallInterfaceDescriptorFor<Builtins::kInstanceOf_Baseline>::type;
+ Register callable = Descriptor::GetRegisterParameter(Descriptor::kRight);
__ Move(callable, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kInstanceOf_Baseline,
- RegisterOperand(0), // object
- callable, // callable
- Index(1)); // slot
+
+ CallBuiltin<Builtins::kInstanceOf_Baseline>(RegisterOperand(0), // object
+ callable, // callable
+ Index(1)); // slot
}
void BaselineCompiler::VisitTestIn() {
- CallBuiltin(Builtins::kKeyedHasICBaseline,
- kInterpreterAccumulatorRegister, // object
- RegisterOperand(0), // name
- IndexAsTagged(1)); // slot
+ CallBuiltin<Builtins::kKeyedHasICBaseline>(
+ kInterpreterAccumulatorRegister, // object
+ RegisterOperand(0), // name
+ IndexAsTagged(1)); // slot
}
void BaselineCompiler::VisitTestUndetectable() {
@@ -1727,36 +1763,36 @@ void BaselineCompiler::VisitTestTypeOf() {
void BaselineCompiler::VisitToName() {
SaveAccumulatorScope save_accumulator(&basm_);
- CallBuiltin(Builtins::kToName, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kToName>(kInterpreterAccumulatorRegister);
StoreRegister(0, kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitToNumber() {
- CallBuiltin(Builtins::kToNumber_Baseline, kInterpreterAccumulatorRegister,
- Index(0));
+ CallBuiltin<Builtins::kToNumber_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
void BaselineCompiler::VisitToNumeric() {
- CallBuiltin(Builtins::kToNumeric_Baseline, kInterpreterAccumulatorRegister,
- Index(0));
+ CallBuiltin<Builtins::kToNumeric_Baseline>(kInterpreterAccumulatorRegister,
+ Index(0));
}
void BaselineCompiler::VisitToObject() {
SaveAccumulatorScope save_accumulator(&basm_);
- CallBuiltin(Builtins::kToObject, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kToObject>(kInterpreterAccumulatorRegister);
StoreRegister(0, kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitToString() {
- CallBuiltin(Builtins::kToString, kInterpreterAccumulatorRegister);
+ CallBuiltin<Builtins::kToString>(kInterpreterAccumulatorRegister);
}
void BaselineCompiler::VisitCreateRegExpLiteral() {
- CallBuiltin(Builtins::kCreateRegExpLiteral,
- FeedbackVector(), // feedback vector
- IndexAsTagged(1), // slot
- Constant<HeapObject>(0), // pattern
- FlagAsSmi(2)); // flags
+ CallBuiltin<Builtins::kCreateRegExpLiteral>(
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<HeapObject>(0), // pattern
+ FlagAsSmi(2)); // flags
}
void BaselineCompiler::VisitCreateArrayLiteral() {
@@ -1765,11 +1801,11 @@ void BaselineCompiler::VisitCreateArrayLiteral() {
interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags));
if (flags &
interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::kMask) {
- CallBuiltin(Builtins::kCreateShallowArrayLiteral,
- FeedbackVector(), // feedback vector
- IndexAsTagged(1), // slot
- Constant<HeapObject>(0), // constant elements
- Smi::FromInt(flags_raw)); // flags
+ CallBuiltin<Builtins::kCreateShallowArrayLiteral>(
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<HeapObject>(0), // constant elements
+ Smi::FromInt(flags_raw)); // flags
} else {
CallRuntime(Runtime::kCreateArrayLiteral,
FeedbackVector(), // feedback vector
@@ -1780,13 +1816,13 @@ void BaselineCompiler::VisitCreateArrayLiteral() {
}
void BaselineCompiler::VisitCreateArrayFromIterable() {
- CallBuiltin(Builtins::kIterableToListWithSymbolLookup,
- kInterpreterAccumulatorRegister); // iterable
+ CallBuiltin<Builtins::kIterableToListWithSymbolLookup>(
+ kInterpreterAccumulatorRegister); // iterable
}
void BaselineCompiler::VisitCreateEmptyArrayLiteral() {
- CallBuiltin(Builtins::kCreateEmptyArrayLiteral, FeedbackVector(),
- IndexAsTagged(0));
+ CallBuiltin<Builtins::kCreateEmptyArrayLiteral>(FeedbackVector(),
+ IndexAsTagged(0));
}
void BaselineCompiler::VisitCreateObjectLiteral() {
@@ -1795,11 +1831,11 @@ void BaselineCompiler::VisitCreateObjectLiteral() {
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags));
if (flags &
interpreter::CreateObjectLiteralFlags::FastCloneSupportedBit::kMask) {
- CallBuiltin(Builtins::kCreateShallowObjectLiteral,
- FeedbackVector(), // feedback vector
- IndexAsTagged(1), // slot
- Constant<ObjectBoilerplateDescription>(0), // boilerplate
- Smi::FromInt(flags_raw)); // flags
+ CallBuiltin<Builtins::kCreateShallowObjectLiteral>(
+ FeedbackVector(), // feedback vector
+ IndexAsTagged(1), // slot
+ Constant<ObjectBoilerplateDescription>(0), // boilerplate
+ Smi::FromInt(flags_raw)); // flags
} else {
CallRuntime(Runtime::kCreateObjectLiteral,
FeedbackVector(), // feedback vector
@@ -1810,39 +1846,39 @@ void BaselineCompiler::VisitCreateObjectLiteral() {
}
void BaselineCompiler::VisitCreateEmptyObjectLiteral() {
- CallBuiltin(Builtins::kCreateEmptyLiteralObject);
+ CallBuiltin<Builtins::kCreateEmptyLiteralObject>();
}
void BaselineCompiler::VisitCloneObject() {
uint32_t flags = Flag(1);
int32_t raw_flags =
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags);
- CallBuiltin(Builtins::kCloneObjectICBaseline,
- RegisterOperand(0), // source
- Smi::FromInt(raw_flags), // flags
- IndexAsTagged(2)); // slot
+ CallBuiltin<Builtins::kCloneObjectICBaseline>(
+ RegisterOperand(0), // source
+ Smi::FromInt(raw_flags), // flags
+ IndexAsTagged(2)); // slot
}
void BaselineCompiler::VisitGetTemplateObject() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
- CallBuiltin(Builtins::kGetTemplateObject,
- shared_function_info_, // shared function info
- Constant<HeapObject>(0), // description
- Index(1), // slot
- FeedbackVector()); // feedback_vector
+ CallBuiltin<Builtins::kGetTemplateObject>(
+ shared_function_info_, // shared function info
+ Constant<HeapObject>(0), // description
+ Index(1), // slot
+ FeedbackVector()); // feedback_vector
}
void BaselineCompiler::VisitCreateClosure() {
Register feedback_cell =
- Builtins::CallInterfaceDescriptorFor(Builtins::kFastNewClosure)
- .GetRegisterParameter(FastNewClosureDescriptor::kFeedbackCell);
+ FastNewClosureBaselineDescriptor::GetRegisterParameter(
+ FastNewClosureBaselineDescriptor::kFeedbackCell);
LoadClosureFeedbackArray(feedback_cell);
__ LoadFixedArrayElement(feedback_cell, feedback_cell, Index(1));
uint32_t flags = Flag(2);
if (interpreter::CreateClosureFlags::FastNewClosureBit::decode(flags)) {
- CallBuiltin(Builtins::kFastNewClosure, Constant<SharedFunctionInfo>(0),
- feedback_cell);
+ CallBuiltin<Builtins::kFastNewClosureBaseline>(
+ Constant<SharedFunctionInfo>(0), feedback_cell);
} else {
Runtime::FunctionId function_id =
interpreter::CreateClosureFlags::PretenuredBit::decode(flags)
@@ -1868,7 +1904,7 @@ void BaselineCompiler::VisitCreateFunctionContext() {
if (slot_count < static_cast<uint32_t>(
ConstructorBuiltins::MaximumFunctionContextSlots())) {
DCHECK_EQ(info->scope_type(), ScopeType::FUNCTION_SCOPE);
- CallBuiltin(Builtins::kFastNewFunctionContextFunction, info, slot_count);
+ CallBuiltin<Builtins::kFastNewFunctionContextFunction>(info, slot_count);
} else {
CallRuntime(Runtime::kNewFunctionContext, Constant<ScopeInfo>(0));
}
@@ -1880,7 +1916,7 @@ void BaselineCompiler::VisitCreateEvalContext() {
if (slot_count < static_cast<uint32_t>(
ConstructorBuiltins::MaximumFunctionContextSlots())) {
DCHECK_EQ(info->scope_type(), ScopeType::EVAL_SCOPE);
- CallBuiltin(Builtins::kFastNewFunctionContextEval, info, slot_count);
+ CallBuiltin<Builtins::kFastNewFunctionContextEval>(info, slot_count);
} else {
CallRuntime(Runtime::kNewFunctionContext, Constant<ScopeInfo>(0));
}
@@ -1896,16 +1932,16 @@ void BaselineCompiler::VisitCreateMappedArguments() {
if (shared_function_info_->has_duplicate_parameters()) {
CallRuntime(Runtime::kNewSloppyArguments, __ FunctionOperand());
} else {
- CallBuiltin(Builtins::kFastNewSloppyArguments, __ FunctionOperand());
+ CallBuiltin<Builtins::kFastNewSloppyArguments>(__ FunctionOperand());
}
}
void BaselineCompiler::VisitCreateUnmappedArguments() {
- CallBuiltin(Builtins::kFastNewStrictArguments, __ FunctionOperand());
+ CallBuiltin<Builtins::kFastNewStrictArguments>(__ FunctionOperand());
}
void BaselineCompiler::VisitCreateRestParameter() {
- CallBuiltin(Builtins::kFastNewRestArguments, __ FunctionOperand());
+ CallBuiltin<Builtins::kFastNewRestArguments>(__ FunctionOperand());
}
void BaselineCompiler::VisitJumpLoop() {
@@ -1919,7 +1955,7 @@ void BaselineCompiler::VisitJumpLoop() {
int loop_depth = iterator().GetImmediateOperand(1);
__ CompareByte(osr_level, loop_depth);
__ JumpIf(Condition::kUnsignedLessThanEqual, &osr_not_armed);
- CallBuiltin(Builtins::kBaselineOnStackReplacement);
+ CallBuiltin<Builtins::kBaselineOnStackReplacement>();
__ RecordComment("]");
__ Bind(&osr_not_armed);
@@ -1972,16 +2008,14 @@ void BaselineCompiler::VisitJumpIfToBooleanFalseConstant() {
void BaselineCompiler::VisitJumpIfToBooleanTrue() {
Label dont_jump;
- JumpIfToBoolean(false, kInterpreterAccumulatorRegister, &dont_jump,
- Label::kNear);
+ JumpIfToBoolean(false, &dont_jump, Label::kNear);
UpdateInterruptBudgetAndDoInterpreterJump();
__ Bind(&dont_jump);
}
void BaselineCompiler::VisitJumpIfToBooleanFalse() {
Label dont_jump;
- JumpIfToBoolean(true, kInterpreterAccumulatorRegister, &dont_jump,
- Label::kNear);
+ JumpIfToBoolean(true, &dont_jump, Label::kNear);
UpdateInterruptBudgetAndDoInterpreterJump();
__ Bind(&dont_jump);
}
@@ -2057,13 +2091,13 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() {
}
void BaselineCompiler::VisitForInEnumerate() {
- CallBuiltin(Builtins::kForInEnumerate, RegisterOperand(0));
+ CallBuiltin<Builtins::kForInEnumerate>(RegisterOperand(0));
}
void BaselineCompiler::VisitForInPrepare() {
StoreRegister(0, kInterpreterAccumulatorRegister);
- CallBuiltin(Builtins::kForInPrepare, kInterpreterAccumulatorRegister,
- IndexAsTagged(1), FeedbackVector());
+ CallBuiltin<Builtins::kForInPrepare>(kInterpreterAccumulatorRegister,
+ IndexAsTagged(1), FeedbackVector());
interpreter::Register first = iterator().GetRegisterOperand(0);
interpreter::Register second(first.index() + 1);
interpreter::Register third(first.index() + 2);
@@ -2085,13 +2119,12 @@ void BaselineCompiler::VisitForInContinue() {
void BaselineCompiler::VisitForInNext() {
interpreter::Register cache_type, cache_array;
std::tie(cache_type, cache_array) = iterator().GetRegisterPairOperand(2);
- CallBuiltin(Builtins::kForInNext,
- Index(3), // vector slot
- RegisterOperand(0), // object
- cache_array, // cache array
- cache_type, // cache type
- RegisterOperand(1), // index
- FeedbackVector()); // feedback vector
+ CallBuiltin<Builtins::kForInNext>(Index(3), // vector slot
+ RegisterOperand(0), // object
+ cache_array, // cache array
+ cache_type, // cache type
+ RegisterOperand(1), // index
+ FeedbackVector()); // feedback vector
}
void BaselineCompiler::VisitForInStep() {
@@ -2131,8 +2164,8 @@ void BaselineCompiler::VisitReturn() {
int parameter_count_without_receiver =
parameter_count - 1; // Exclude the receiver to simplify the
// computation. We'll account for it at the end.
- TailCallBuiltin(Builtins::kBaselineLeaveFrame,
- parameter_count_without_receiver, -profiling_weight);
+ TailCallBuiltin<Builtins::kBaselineLeaveFrame>(
+ parameter_count_without_receiver, -profiling_weight);
__ RecordComment("]");
}
@@ -2235,10 +2268,11 @@ void BaselineCompiler::VisitSuspendGenerator() {
int bytecode_offset =
BytecodeArray::kHeaderSize + iterator().current_offset();
- CallBuiltin(Builtins::kSuspendGeneratorBaseline, generator_object,
- static_cast<int>(Uint(3)), // suspend_id
- bytecode_offset,
- static_cast<int>(RegisterCount(2))); // register_count
+ CallBuiltin<Builtins::kSuspendGeneratorBaseline>(
+ generator_object,
+ static_cast<int>(Uint(3)), // suspend_id
+ bytecode_offset,
+ static_cast<int>(RegisterCount(2))); // register_count
}
VisitReturn();
}
@@ -2248,26 +2282,27 @@ void BaselineCompiler::VisitResumeGenerator() {
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
Register generator_object = scratch_scope.AcquireScratch();
LoadRegister(generator_object, 0);
- CallBuiltin(Builtins::kResumeGeneratorBaseline, generator_object,
- static_cast<int>(RegisterCount(2))); // register_count
+ CallBuiltin<Builtins::kResumeGeneratorBaseline>(
+ generator_object,
+ static_cast<int>(RegisterCount(2))); // register_count
}
void BaselineCompiler::VisitGetIterator() {
- CallBuiltin(Builtins::kGetIteratorBaseline,
- RegisterOperand(0), // receiver
- IndexAsTagged(1), // load_slot
- IndexAsTagged(2)); // call_slot
+ CallBuiltin<Builtins::kGetIteratorBaseline>(RegisterOperand(0), // receiver
+ IndexAsTagged(1), // load_slot
+ IndexAsTagged(2)); // call_slot
}
void BaselineCompiler::VisitDebugger() {
SaveAccumulatorScope accumulator_scope(&basm_);
- CallBuiltin(Builtins::kHandleDebuggerStatement);
+ CallRuntime(Runtime::kHandleDebuggerStatement);
}
void BaselineCompiler::VisitIncBlockCounter() {
SaveAccumulatorScope accumulator_scope(&basm_);
- CallBuiltin(Builtins::kIncBlockCounter, __ FunctionOperand(),
- IndexAsSmi(0)); // coverage array slot
+ CallBuiltin<Builtins::kIncBlockCounter>(
+ __ FunctionOperand(),
+ IndexAsSmi(0)); // coverage array slot
}
void BaselineCompiler::VisitAbort() {
diff --git a/chromium/v8/src/baseline/baseline-compiler.h b/chromium/v8/src/baseline/baseline-compiler.h
index dbb2f64f6c5..c86d9417e8a 100644
--- a/chromium/v8/src/baseline/baseline-compiler.h
+++ b/chromium/v8/src/baseline/baseline-compiler.h
@@ -8,7 +8,7 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
@@ -39,8 +39,10 @@ class BytecodeOffsetTableBuilder {
previous_pc_ = pc_offset;
}
- template <typename LocalIsolate>
- Handle<ByteArray> ToBytecodeOffsetTable(LocalIsolate* isolate);
+ template <typename IsolateT>
+ Handle<ByteArray> ToBytecodeOffsetTable(IsolateT* isolate);
+
+ void Reserve(size_t size) { bytes_.reserve(size); }
private:
size_t previous_pc_ = 0;
@@ -121,31 +123,21 @@ class BaselineCompiler {
void SelectBooleanConstant(
Register output, std::function<void(Label*, Label::Distance)> jump_func);
- // Returns ToBoolean result into kInterpreterAccumulatorRegister.
- void JumpIfToBoolean(bool do_jump_if_true, Register reg, Label* label,
+ // Jumps based on calling ToBoolean on kInterpreterAccumulatorRegister.
+ void JumpIfToBoolean(bool do_jump_if_true, Label* label,
Label::Distance distance = Label::kFar);
// Call helpers.
- template <typename... Args>
- void CallBuiltin(Builtins::Name builtin, Args... args);
+ template <Builtins::Name kBuiltin, typename... Args>
+ void CallBuiltin(Args... args);
template <typename... Args>
void CallRuntime(Runtime::FunctionId function, Args... args);
- template <typename... Args>
- void TailCallBuiltin(Builtins::Name builtin, Args... args);
+ template <Builtins::Name kBuiltin, typename... Args>
+ void TailCallBuiltin(Args... args);
- void BuildBinop(
- Builtins::Name builtin_name, bool fast_path = false,
- bool check_overflow = false,
- std::function<void(Register, Register)> instruction = [](Register,
- Register) {});
- void BuildUnop(Builtins::Name builtin_name);
- void BuildCompare(Builtins::Name builtin_name);
- void BuildBinopWithConstant(Builtins::Name builtin_name);
-
- template <typename... Args>
- void BuildCall(ConvertReceiverMode mode, uint32_t slot, uint32_t arg_count,
- Args... args);
+ template <ConvertReceiverMode kMode, typename... Args>
+ void BuildCall(uint32_t slot, uint32_t arg_count, Args... args);
#ifdef V8_TRACE_UNOPTIMIZED
void TraceBytecode(Runtime::FunctionId function_id);
diff --git a/chromium/v8/src/baseline/baseline-osr-inl.h b/chromium/v8/src/baseline/baseline-osr-inl.h
new file mode 100644
index 00000000000..d37007f9cf6
--- /dev/null
+++ b/chromium/v8/src/baseline/baseline-osr-inl.h
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#ifndef V8_BASELINE_BASELINE_OSR_INL_H_
+#define V8_BASELINE_BASELINE_OSR_INL_H_
+
+#include "src/execution/frames.h"
+#include "src/execution/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+inline void OSRInterpreterFrameToBaseline(Isolate* isolate,
+ Handle<JSFunction> function,
+ UnoptimizedFrame* frame) {
+ IsCompiledScope is_compiled_scope(
+ function->shared().is_compiled_scope(isolate));
+ if (Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
+ &is_compiled_scope)) {
+ if (V8_LIKELY(FLAG_use_osr)) {
+ DCHECK_NOT_NULL(frame);
+ if (FLAG_trace_osr) {
+ CodeTracer::Scope scope(isolate->GetCodeTracer());
+ PrintF(scope.file(),
+ "[OSR - Entry at OSR bytecode offset %d into baseline code]\n",
+ frame->GetBytecodeOffset());
+ }
+ frame->GetBytecodeArray().set_osr_loop_nesting_level(
+ AbstractCode::kMaxLoopNestingMarker);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_BASELINE_OSR_INL_H_
diff --git a/chromium/v8/src/baseline/baseline.cc b/chromium/v8/src/baseline/baseline.cc
index b5355660f94..c7cc130c5ed 100644
--- a/chromium/v8/src/baseline/baseline.cc
+++ b/chromium/v8/src/baseline/baseline.cc
@@ -5,14 +5,16 @@
#include "src/baseline/baseline.h"
#include "src/handles/maybe-handles.h"
+#include "src/objects/shared-function-info.h"
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
+#include "src/debug/debug.h"
#include "src/heap/factory-inl.h"
#include "src/logging/counters.h"
#include "src/objects/script-inl.h"
@@ -21,10 +23,36 @@
namespace v8 {
namespace internal {
+bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
+ DisallowGarbageCollection no_gc;
+
+ if (!FLAG_sparkplug) return false;
+
+ // Check that short builtin calls are enabled if needed.
+ if (FLAG_sparkplug_needs_short_builtins &&
+ !isolate->is_short_builtin_calls_enabled()) {
+ return false;
+ }
+
+ // Check if we actually have bytecode.
+ if (!shared.HasBytecodeArray()) return false;
+
+ // Do not optimize when debugger needs to hook into every call.
+ if (isolate->debug()->needs_check_on_function_call()) return false;
+
+ // Functions with breakpoints have to stay interpreted.
+ if (shared.HasBreakInfo()) return false;
+
+ // Do not baseline compile if sparkplug is disabled or function doesn't pass
+ // sparkplug_filter.
+ if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false;
+
+ return true;
+}
+
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kCompileBaseline);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline);
baseline::BaselineCompiler compiler(
isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate));
@@ -48,6 +76,10 @@ void EmitReturnBaseline(MacroAssembler* masm) {
namespace v8 {
namespace internal {
+bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
+ return false;
+}
+
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared) {
UNREACHABLE();
diff --git a/chromium/v8/src/baseline/baseline.h b/chromium/v8/src/baseline/baseline.h
index 2dba2d9674b..10a6e25e4fb 100644
--- a/chromium/v8/src/baseline/baseline.h
+++ b/chromium/v8/src/baseline/baseline.h
@@ -14,6 +14,8 @@ class Code;
class SharedFunctionInfo;
class MacroAssembler;
+bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared);
+
MaybeHandle<Code> GenerateBaselineCode(Isolate* isolate,
Handle<SharedFunctionInfo> shared);
diff --git a/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index 2cd34aef710..8babb4a5b7b 100644
--- a/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/chromium/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -122,13 +122,13 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
__ RecordCommentForOffHeapTrampoline(builtin);
__ jmp(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
void BaselineAssembler::Test(Register value, int mask) {
@@ -147,7 +147,7 @@ void BaselineAssembler::CmpObjectType(Register object,
}
void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ movd(xmm0, eax);
__ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, eax);
@@ -320,7 +320,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
Register scratch = scratch_scope.AcquireScratch();
DCHECK(!AreAliased(scratch, target, value));
__ mov(FieldOperand(target, offset), value);
- __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
+ __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h b/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
index 733c05fe185..4d09f536653 100644
--- a/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
+++ b/chromium/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h
@@ -18,9 +18,9 @@ namespace baseline {
void BaselineCompiler::Prologue() {
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
diff --git a/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
new file mode 100644
index 00000000000..e0667d3472b
--- /dev/null
+++ b/chromium/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -0,0 +1,615 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
+#define V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/interface-descriptors.h"
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+constexpr Register kTestReg = t0;
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ wrapped_scope_.Include(t2, t4);
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+enum class Condition : uint32_t {
+ kEqual = eq,
+ kNotEqual = ne,
+
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = eq,
+ kNotZero = ne,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.is_reg() && op.rm() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // Nop
+}
+
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ jmp(target);
+}
+void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), kTestReg, Operand((int64_t)0));
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
+ if (masm()->options().short_builtin_calls) {
+ __ CallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ Register temp = t6;
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Call(temp);
+ __ RecordComment("]");
+ }
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
+ if (masm()->options().short_builtin_calls) {
+ // Generate pc-relative jump.
+ __ TailCallBuiltin(builtin);
+ } else {
+ __ RecordCommentForOffHeapTrampoline(builtin);
+ // t6 be used for function call in RISCV64
+ // For example 'jalr t6' or 'jal t6'
+ Register temp = t6;
+ __ LoadEntryFromBuiltinIndex(builtin, temp);
+ __ Jump(temp);
+ __ RecordComment("]");
+ }
+}
+
+void BaselineAssembler::Test(Register value, int mask) {
+ __ And(kTestReg, value, Operand(mask));
+}
+
+void BaselineAssembler::CmpObjectType(Register object,
+ InstanceType instance_type,
+ Register map) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ GetObjectType(object, map, type);
+ __ Sub64(kTestReg, type, Operand(instance_type));
+}
+void BaselineAssembler::CmpInstanceType(Register value,
+ InstanceType instance_type) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ Ld(type, FieldMemOperand(value, Map::kInstanceTypeOffset));
+ __ Sub64(kTestReg, type, Operand(instance_type));
+}
+
+void BaselineAssembler::Cmp(Register value, Smi smi) {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ li(temp, Operand(smi));
+ __ SmiUntag(temp);
+ __ Sub64(kTestReg, value, temp);
+}
+void BaselineAssembler::ComparePointer(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register temp = temps.AcquireScratch();
+ __ Ld(temp, operand);
+ __ Sub64(kTestReg, value, temp);
+}
+
+void BaselineAssembler::SmiCompare(Register lhs, Register rhs) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, lhs, rhs);
+ } else {
+ __ Sub64(kTestReg, lhs, rhs);
+ }
+}
+void BaselineAssembler::CompareTagged(Register value, MemOperand operand) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ld(tmp, operand);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, value, tmp);
+ } else {
+ __ Sub64(kTestReg, value, tmp);
+ }
+}
+void BaselineAssembler::CompareTagged(MemOperand operand, Register value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ Ld(tmp, operand);
+ if (COMPRESS_POINTERS_BOOL) {
+ __ Sub32(kTestReg, tmp, value);
+ } else {
+ __ Sub64(kTestReg, tmp, value);
+ }
+}
+
+void BaselineAssembler::CompareByte(Register value, int32_t byte) {
+ __ Sub64(kTestReg, value, Operand(byte));
+}
+
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ li(output, Operand(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ Sd(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ li(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct CountPushHelper;
+template <>
+struct CountPushHelper<> {
+ static int Count() { return 0; }
+};
+template <typename Arg, typename... Args>
+struct CountPushHelper<Arg, Args...> {
+ static int Count(Arg arg, Args... args) {
+ return 1 + CountPushHelper<Args...>::Count(args...);
+ }
+};
+template <typename... Args>
+struct CountPushHelper<interpreter::RegisterList, Args...> {
+ static int Count(interpreter::RegisterList list, Args... args) {
+ return list.register_count() + CountPushHelper<Args...>::Count(args...);
+ }
+};
+
+template <typename... Args>
+struct PushAllHelper;
+template <typename... Args>
+void PushAll(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::Push(basm, args...);
+}
+template <typename... Args>
+void PushAllReverse(BaselineAssembler* basm, Args... args) {
+ PushAllHelper<Args...>::PushReverse(basm, args...);
+}
+
+template <>
+struct PushAllHelper<> {
+ static void Push(BaselineAssembler* basm) {}
+ static void PushReverse(BaselineAssembler* basm) {}
+};
+
+inline void PushSingle(MacroAssembler* masm, RootIndex source) {
+ masm->PushRoot(source);
+}
+inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
+
+inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
+inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
+ masm->Push(object);
+}
+inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
+ masm->li(kScratchReg, (int64_t)(immediate));
+ PushSingle(masm, kScratchReg);
+}
+
+inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
+ masm->li(kScratchReg, static_cast<int64_t>(value.ptr()));
+ PushSingle(masm, kScratchReg);
+}
+inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
+ masm->Ld(kScratchReg, operand);
+ PushSingle(masm, kScratchReg);
+}
+inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
+ return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
+}
+
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static void Push(BaselineAssembler* basm, Arg arg) {
+ PushSingle(basm->masm(), arg);
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg arg) {
+ // Push the padding register to round up the amount of values pushed.
+ return Push(basm, arg);
+ }
+};
+template <typename Arg1, typename Arg2, typename... Args>
+struct PushAllHelper<Arg1, Arg2, Args...> {
+ static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg1),
+ ToRegister(basm, &scope, arg2));
+ }
+ PushAll(basm, args...);
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
+ Args... args) {
+ PushAllReverse(basm, args...);
+ {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg2),
+ ToRegister(basm, &scope, arg1));
+ }
+ }
+};
+// Currently RegisterLists are always be the last argument, so we don't
+// specialize for the case where they're not. We do still specialise for the
+// aligned and unaligned cases.
+template <typename Arg>
+struct PushAllHelper<Arg, interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 1);
+ PushAll(basm, arg, list[0], list.PopLeft());
+ }
+ static void PushReverse(BaselineAssembler* basm, Arg arg,
+ interpreter::RegisterList list) {
+ if (list.register_count() == 0) {
+ PushAllReverse(basm, arg);
+ } else {
+ PushAllReverse(basm, arg, list[0], list.PopLeft());
+ }
+ }
+};
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ DCHECK_EQ(list.register_count() % 2, 0);
+ for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
+ PushAll(basm, list[reg_index], list[reg_index + 1]);
+ }
+ }
+ static void PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ int reg_index = list.register_count() - 1;
+ if (reg_index % 2 == 0) {
+ // Push the padding register to round up the amount of values pushed.
+ PushAllReverse(basm, list[reg_index]);
+ reg_index--;
+ }
+ for (; reg_index >= 1; reg_index -= 2) {
+ PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
+ }
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
+ T... tail) {
+ basm->masm()->Pop(reg1, reg2);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ // We have to count the pushes first, to decide whether to add padding before
+ // the first push.
+ int push_count = detail::CountPushHelper<T...>::Count(vals...);
+ if (push_count % 2 == 0) {
+ detail::PushAll(this, vals...);
+ } else {
+ detail::PushAll(this, vals...);
+ }
+ return push_count;
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ // __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ Ld(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ li(tmp, Operand(value));
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(tmp, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(value, FieldMemOperand(target, offset));
+ ScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireScratch();
+ __ RecordWriteField(target, offset, value, tmp, kRAHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ // FIXME(riscv64): riscv64 don't implement pointer compressed
+ __ Sd(value, FieldMemOperand(target, offset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(int32_t weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Add64(interrupt_budget, interrupt_budget, weight);
+ __ Sd(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddToInterruptBudget(Register weight) {
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ // Remember to set flags as part of the add!
+ __ Add64(interrupt_budget, interrupt_budget, weight);
+ __ Sd(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ if (SmiValuesAre31Bits()) {
+ __ Add32(lhs, lhs, Operand(rhs));
+ } else {
+ __ Add64(lhs, lhs, Operand(rhs));
+ }
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ Sub64(reg, reg, Operand(case_value_base));
+ }
+
+ // Mostly copied from code-generator-riscv64.cc
+ ScratchRegisterScope scope(this);
+ Register temp = scope.AcquireScratch();
+ Label table;
+ __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
+ reg, Operand(int64_t(num_labels)));
+ int64_t imm64;
+ imm64 = __ branch_long_offset(&table);
+ DCHECK(is_int32(imm64));
+ int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
+ __ auipc(temp, Hi20); // Read PC + Hi20 into t6
+ __ lui(temp, Lo12); // jump PC + Hi20 + Lo12
+
+ int entry_size_log2 = 2;
+ Register temp2 = scope.AcquireScratch();
+ __ CalcScaledAddress(temp2, temp, reg, entry_size_log2);
+ __ Jump(temp);
+ {
+ TurboAssembler::BlockTrampolinePoolScope(masm());
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ __ bind(&table);
+ for (int i = 0; i < num_labels; ++i) {
+ __ Branch(labels[i]);
+ }
+ DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table));
+ __ bind(&fallthrough);
+ }
+}
+
+#undef __
+
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ __ RecordComment("[ Update Interrupt Budget");
+ __ AddToInterruptBudget(weight);
+
+ // Use compare flags set by add
+ Label skip_interrupt_label;
+ __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label);
+ {
+ __ masm()->SmiTag(params_size);
+ __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ masm()->Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->SmiUntag(params_size);
+ }
+ __ RecordComment("]");
+
+ __ Bind(&skip_interrupt_label);
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->Branch(&corrected_args_count, ge, params_size,
+ Operand(actual_params_size));
+ __ masm()->Move(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->Add64(params_size, params_size, 1); // Include the receiver.
+ __ masm()->slli(params_size, params_size, kPointerSizeLog2);
+ __ masm()->Add64(sp, sp, params_size);
+ __ masm()->Ret();
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
diff --git a/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
new file mode 100644
index 00000000000..98ca62e3034
--- /dev/null
+++ b/chromium/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
@@ -0,0 +1,112 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
+#define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
+
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ // Enter the frame here, since CallBuiltin will override lr.
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size = bytecode_->frame_size() + max_call_args_;
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ __ RecordComment("[ Fill frame");
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ // BaselineOutOfLinePrologue already pushed one undefined.
+ register_count -= 1;
+ if (has_new_target) {
+ if (new_target_index == 0) {
+ // Oops, need to fix up that undefined that BaselineOutOfLinePrologue
+ // pushed.
+ __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
+ } else {
+ DCHECK_LE(new_target_index, register_count);
+ int index = 1;
+ for (; index + 2 <= new_target_index; index += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ if (index == new_target_index) {
+ __ masm()->Push(kJavaScriptCallNewTargetRegister,
+ kInterpreterAccumulatorRegister);
+ } else {
+ DCHECK_EQ(index, new_target_index - 1);
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kJavaScriptCallNewTargetRegister);
+ }
+ // We pushed "index" registers, minus the one the prologue pushed, plus
+ // the two registers that included new_target.
+ register_count -= (index - 1 + 2);
+ }
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ for (int i = 0; i < register_count; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ } else {
+ BaselineAssembler::ScratchRegisterScope temps(&basm_);
+ Register scratch = temps.AcquireScratch();
+
+ // Extract the first few registers to round to the unroll size.
+ int first_registers = register_count % kLoopUnrollSize;
+ for (int i = 0; i < first_registers; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ Move(scratch, register_count / kLoopUnrollSize);
+ // We enter the loop unconditionally, so make sure we need to loop at least
+ // once.
+ DCHECK_GT(register_count / kLoopUnrollSize, 0);
+ Label loop;
+ __ Bind(&loop);
+ for (int i = 0; i < kLoopUnrollSize; i += 2) {
+ __ masm()->Push(kInterpreterAccumulatorRegister,
+ kInterpreterAccumulatorRegister);
+ }
+ __ masm()->Sub64(scratch, scratch, 1);
+ __ JumpIf(Condition::kGreaterThan, &loop);
+ }
+ __ RecordComment("]");
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ __ masm()->Add64(kScratchReg, sp,
+ RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size(),
+ 2 * kSystemPointerSize));
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
+ Operand(fp));
+}
+
+#undef __
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
diff --git a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index 202f83c7615..98ed29a9cae 100644
--- a/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/chromium/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -7,7 +7,6 @@
#include "src/base/macros.h"
#include "src/baseline/baseline-assembler.h"
-#include "src/codegen/interface-descriptors.h"
#include "src/codegen/x64/register-x64.h"
namespace v8 {
@@ -129,7 +128,7 @@ void BaselineAssembler::CallBuiltin(Builtins::Name builtin) {
} else {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Call(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
}
@@ -140,7 +139,7 @@ void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) {
} else {
__ RecordCommentForOffHeapTrampoline(builtin);
__ Jump(__ EntryFromBuiltinIndexAsOperand(builtin));
- if (FLAG_code_comments) __ RecordComment("]");
+ __ RecordComment("]");
}
}
@@ -160,7 +159,7 @@ void BaselineAssembler::CmpObjectType(Register object,
}
void BaselineAssembler::CmpInstanceType(Register map,
InstanceType instance_type) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
__ AssertNotSmi(map);
__ CmpObjectType(map, MAP_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kUnexpectedValue);
@@ -201,7 +200,7 @@ void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
__ Move(output, value);
}
void BaselineAssembler::Move(Register output, int32_t value) {
- __ Move(output, Immediate(value));
+ __ Move(output, value);
}
void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
__ mov_tagged(output, source);
@@ -326,7 +325,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
DCHECK_NE(target, scratch);
DCHECK_NE(value, scratch);
__ StoreTaggedField(FieldOperand(target, offset), value);
- __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs);
+ __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
diff --git a/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h
index 73b43770e56..a4d547b0671 100644
--- a/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h
+++ b/chromium/v8/src/baseline/x64/baseline-compiler-x64-inl.h
@@ -18,9 +18,9 @@ namespace baseline {
void BaselineCompiler::Prologue() {
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
- CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister,
- kJSFunctionRegister, kJavaScriptCallArgCountRegister,
- max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+ CallBuiltin<Builtins::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
diff --git a/chromium/v8/src/bigint/bigint-internal.cc b/chromium/v8/src/bigint/bigint-internal.cc
new file mode 100644
index 00000000000..6630c6c4c9d
--- /dev/null
+++ b/chromium/v8/src/bigint/bigint-internal.cc
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bigint/bigint-internal.h"
+
+namespace v8 {
+namespace bigint {
+
+ProcessorImpl::ProcessorImpl(Platform* platform) : platform_(platform) {}
+
+ProcessorImpl::~ProcessorImpl() { delete platform_; }
+
+Status ProcessorImpl::get_and_clear_status() {
+ Status result = status_;
+ status_ = Status::kOk;
+ return result;
+}
+
+Processor* Processor::New(Platform* platform) {
+ ProcessorImpl* impl = new ProcessorImpl(platform);
+ return static_cast<Processor*>(impl);
+}
+
+void Processor::Destroy() { delete static_cast<ProcessorImpl*>(this); }
+
+void ProcessorImpl::Multiply(RWDigits Z, Digits X, Digits Y) {
+ X.Normalize();
+ Y.Normalize();
+ if (X.len() == 0 || Y.len() == 0) return Z.Clear();
+ if (X.len() < Y.len()) std::swap(X, Y);
+ if (Y.len() == 1) return MultiplySingle(Z, X, Y[0]);
+ return MultiplySchoolbook(Z, X, Y);
+}
+
+Status Processor::Multiply(RWDigits Z, Digits X, Digits Y) {
+ ProcessorImpl* impl = static_cast<ProcessorImpl*>(this);
+ impl->Multiply(Z, X, Y);
+ return impl->get_and_clear_status();
+}
+
+} // namespace bigint
+} // namespace v8
diff --git a/chromium/v8/src/bigint/bigint-internal.h b/chromium/v8/src/bigint/bigint-internal.h
new file mode 100644
index 00000000000..efe63a06a51
--- /dev/null
+++ b/chromium/v8/src/bigint/bigint-internal.h
@@ -0,0 +1,65 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BIGINT_BIGINT_INTERNAL_H_
+#define V8_BIGINT_BIGINT_INTERNAL_H_
+
+#include "src/bigint/bigint.h"
+
+namespace v8 {
+namespace bigint {
+
+class ProcessorImpl : public Processor {
+ public:
+ explicit ProcessorImpl(Platform* platform);
+ ~ProcessorImpl();
+
+ Status get_and_clear_status();
+
+ void Multiply(RWDigits Z, Digits X, Digits Y);
+ void MultiplySingle(RWDigits Z, Digits X, digit_t y);
+ void MultiplySchoolbook(RWDigits Z, Digits X, Digits Y);
+
+ private:
+ // Each unit is supposed to represent approximately one CPU {mul} instruction.
+ // Doesn't need to be accurate; we just want to make sure to check for
+ // interrupt requests every now and then (roughly every 10-100 ms; often
+ // enough not to appear stuck, rarely enough not to cause noticeable
+ // overhead).
+ static const uintptr_t kWorkEstimateThreshold = 5000000;
+
+ void AddWorkEstimate(uintptr_t estimate) {
+ work_estimate_ += estimate;
+ if (work_estimate_ >= kWorkEstimateThreshold) {
+ work_estimate_ = 0;
+ if (platform_->InterruptRequested()) {
+ status_ = Status::kInterrupted;
+ }
+ }
+ }
+
+ bool should_terminate() { return status_ == Status::kInterrupted; }
+
+ uintptr_t work_estimate_{0};
+ Status status_{Status::kOk};
+ Platform* platform_;
+};
+
+#define CHECK(cond) \
+ if (!(cond)) { \
+ std::cerr << __FILE__ << ":" << __LINE__ << ": "; \
+ std::cerr << "Assertion failed: " #cond "\n"; \
+ abort(); \
+ }
+
+#ifdef DEBUG
+#define DCHECK(cond) CHECK(cond)
+#else
+#define DCHECK(cond) (void(0))
+#endif
+
+} // namespace bigint
+} // namespace v8
+
+#endif // V8_BIGINT_BIGINT_INTERNAL_H_
diff --git a/chromium/v8/src/bigint/bigint.h b/chromium/v8/src/bigint/bigint.h
index a87622b167a..a365359c530 100644
--- a/chromium/v8/src/bigint/bigint.h
+++ b/chromium/v8/src/bigint/bigint.h
@@ -120,9 +120,117 @@ class Digits {
}
};
+// Writable version of a Digits array.
+// Does not own the memory it points at.
+class RWDigits : public Digits {
+ public:
+ RWDigits(digit_t* mem, int len) : Digits(mem, len) {}
+ RWDigits(RWDigits src, int offset, int len) : Digits(src, offset, len) {}
+ RWDigits operator+(int i) {
+ BIGINT_H_DCHECK(i >= 0 && i <= len_);
+ return RWDigits(digits_ + i, len_ - i);
+ }
+
+#if UINTPTR_MAX == 0xFFFFFFFF
+ digit_t& operator[](int i) {
+ BIGINT_H_DCHECK(i >= 0 && i < len_);
+ return digits_[i];
+ }
+#else
+ // 64-bit platform. We only require digits arrays to be 4-byte aligned,
+ // so we use a wrapper class to allow regular array syntax while
+ // performing unaligned memory accesses under the hood.
+ class WritableDigitReference {
+ public:
+ // Support "X[i] = x" notation.
+ void operator=(digit_t digit) { memcpy(ptr_, &digit, sizeof(digit)); }
+ // Support "X[i] = Y[j]" notation.
+ WritableDigitReference& operator=(const WritableDigitReference& src) {
+ memcpy(ptr_, src.ptr_, sizeof(digit_t));
+ return *this;
+ }
+ // Support "x = X[i]" notation.
+ operator digit_t() {
+ digit_t result;
+ memcpy(&result, ptr_, sizeof(result));
+ return result;
+ }
+
+ private:
+ // This class is not for public consumption.
+ friend class RWDigits;
+ // Primary constructor.
+ explicit WritableDigitReference(digit_t* ptr)
+ : ptr_(reinterpret_cast<uint32_t*>(ptr)) {}
+ // Required for returning WDR instances from "operator[]" below.
+ WritableDigitReference(const WritableDigitReference& src) = default;
+
+ uint32_t* ptr_;
+ };
+
+ WritableDigitReference operator[](int i) {
+ BIGINT_H_DCHECK(i >= 0 && i < len_);
+ return WritableDigitReference(digits_ + i);
+ }
+#endif
+
+ digit_t* digits() { return digits_; }
+ void set_len(int len) { len_ = len; }
+
+ void Clear() { memset(digits_, 0, len_ * sizeof(digit_t)); }
+};
+
+class Platform {
+ public:
+ virtual ~Platform() = default;
+
+ // If you want the ability to interrupt long-running operations, implement
+ // a Platform subclass that overrides this method. It will be queried
+ // every now and then by long-running operations.
+ virtual bool InterruptRequested() { return false; }
+};
+
+// These are the operations that this library supports.
+// The signatures follow the convention:
+//
+// void Operation(RWDigits results, Digits inputs);
+//
+// You must preallocate the result; use the respective {OperationResultLength}
+// function to determine its minimum required length. The actual result may
+// be smaller, so you should call result.Normalize() on the result.
+//
+// The operations are divided into two groups: "fast" (O(n) with small
+// coefficient) operations are exposed directly as free functions, "slow"
+// operations are methods on a {BigIntProcessor} object, which provides
+// support for interrupting execution via the {Platform}'s {InterruptRequested}
+// mechanism when it takes too long. These functions return a {Status} value.
+
// Returns r such that r < 0 if A < B; r > 0 if A > B; r == 0 if A == B.
int Compare(Digits A, Digits B);
+enum class Status { kOk, kInterrupted };
+
+class Processor {
+ public:
+ // Takes ownership of {platform}.
+ static Processor* New(Platform* platform);
+
+ // Use this for any std::unique_ptr holding an instance of BigIntProcessor.
+ class Destroyer {
+ public:
+ void operator()(Processor* proc) { proc->Destroy(); }
+ };
+ // When not using std::unique_ptr, call this to delete the instance.
+ void Destroy();
+
+ // Z := X * Y
+ Status Multiply(RWDigits Z, Digits X, Digits Y);
+};
+
+inline int MultiplyResultLength(Digits X, Digits Y) {
+ return X.len() + Y.len();
+}
+
} // namespace bigint
} // namespace v8
diff --git a/chromium/v8/src/bigint/digit-arithmetic.h b/chromium/v8/src/bigint/digit-arithmetic.h
new file mode 100644
index 00000000000..1c5c93c0353
--- /dev/null
+++ b/chromium/v8/src/bigint/digit-arithmetic.h
@@ -0,0 +1,87 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper functions that operate on individual digits.
+
+#ifndef V8_BIGINT_DIGIT_ARITHMETIC_H_
+#define V8_BIGINT_DIGIT_ARITHMETIC_H_
+
+#include "src/bigint/bigint.h"
+
+namespace v8 {
+namespace bigint {
+
+static constexpr int kHalfDigitBits = kDigitBits / 2;
+static constexpr digit_t kHalfDigitBase = digit_t{1} << kHalfDigitBits;
+static constexpr digit_t kHalfDigitMask = kHalfDigitBase - 1;
+
+// {carry} will be set to 0 or 1.
+inline digit_t digit_add2(digit_t a, digit_t b, digit_t* carry) {
+#if HAVE_TWODIGIT_T
+ twodigit_t result = twodigit_t{a} + b;
+ *carry = result >> kDigitBits;
+ return static_cast<digit_t>(result);
+#else
+ digit_t result = a + b;
+ *carry = (result < a) ? 1 : 0;
+ return result;
+#endif
+}
+
+// This compiles to slightly better machine code than repeated invocations
+// of {digit_add2}.
+inline digit_t digit_add3(digit_t a, digit_t b, digit_t c, digit_t* carry) {
+#if HAVE_TWODIGIT_T
+ twodigit_t result = twodigit_t{a} + b + c;
+ *carry = result >> kDigitBits;
+ return static_cast<digit_t>(result);
+#else
+ digit_t result = a + b;
+ *carry = (result < a) ? 1 : 0;
+ result += c;
+ if (result < c) *carry += 1;
+ return result;
+#endif
+}
+
+// Returns the low half of the result. High half is in {high}.
+inline digit_t digit_mul(digit_t a, digit_t b, digit_t* high) {
+#if HAVE_TWODIGIT_T
+ twodigit_t result = twodigit_t{a} * b;
+ *high = result >> kDigitBits;
+ return static_cast<digit_t>(result);
+#else
+ // Multiply in half-pointer-sized chunks.
+ // For inputs [AH AL]*[BH BL], the result is:
+ //
+ // [AL*BL] // r_low
+ // + [AL*BH] // r_mid1
+ // + [AH*BL] // r_mid2
+ // + [AH*BH] // r_high
+ // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1]
+ //
+ // Where of course we must be careful with carries between the columns.
+ digit_t a_low = a & kHalfDigitMask;
+ digit_t a_high = a >> kHalfDigitBits;
+ digit_t b_low = b & kHalfDigitMask;
+ digit_t b_high = b >> kHalfDigitBits;
+
+ digit_t r_low = a_low * b_low;
+ digit_t r_mid1 = a_low * b_high;
+ digit_t r_mid2 = a_high * b_low;
+ digit_t r_high = a_high * b_high;
+
+ digit_t carry = 0;
+ digit_t low = digit_add3(r_low, r_mid1 << kHalfDigitBits,
+ r_mid2 << kHalfDigitBits, &carry);
+ *high =
+ (r_mid1 >> kHalfDigitBits) + (r_mid2 >> kHalfDigitBits) + r_high + carry;
+ return low;
+#endif
+}
+
+} // namespace bigint
+} // namespace v8
+
+#endif // V8_BIGINT_DIGIT_ARITHMETIC_H_
diff --git a/chromium/v8/src/bigint/mul-schoolbook.cc b/chromium/v8/src/bigint/mul-schoolbook.cc
new file mode 100644
index 00000000000..8e10685018c
--- /dev/null
+++ b/chromium/v8/src/bigint/mul-schoolbook.cc
@@ -0,0 +1,99 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bigint/bigint-internal.h"
+#include "src/bigint/digit-arithmetic.h"
+#include "src/bigint/vector-arithmetic.h"
+
+namespace v8 {
+namespace bigint {
+
+// Z := X * y, where y is a single digit.
+void ProcessorImpl::MultiplySingle(RWDigits Z, Digits X, digit_t y) {
+ DCHECK(y != 0); // NOLINT(readability/check)
+ digit_t carry = 0;
+ digit_t high = 0;
+ for (int i = 0; i < X.len(); i++) {
+ digit_t new_high;
+ digit_t low = digit_mul(X[i], y, &new_high);
+ Z[i] = digit_add3(low, high, carry, &carry);
+ high = new_high;
+ }
+ AddWorkEstimate(X.len());
+ Z[X.len()] = carry + high;
+ for (int i = X.len() + 1; i < Z.len(); i++) Z[i] = 0;
+}
+
+#define BODY(min, max) \
+ for (int j = min; j <= max; j++) { \
+ digit_t high; \
+ digit_t low = digit_mul(X[j], Y[i - j], &high); \
+ digit_t carrybit; \
+ zi = digit_add2(zi, low, &carrybit); \
+ carry += carrybit; \
+ next = digit_add2(next, high, &carrybit); \
+ next_carry += carrybit; \
+ } \
+ Z[i] = zi
+
+// Z := X * Y.
+// O(n²) "schoolbook" multiplication algorithm. Optimized to minimize
+// bounds and overflow checks: rather than looping over X for every digit
+// of Y (or vice versa), we loop over Z. The {BODY} macro above is what
+// computes one of Z's digits as a sum of the products of relevant digits
+// of X and Y. This yields a nearly 2x improvement compared to more obvious
+// implementations.
+// This method is *highly* performance sensitive even for the advanced
+// algorithms, which use this as the base case of their recursive calls.
+void ProcessorImpl::MultiplySchoolbook(RWDigits Z, Digits X, Digits Y) {
+ DCHECK(IsDigitNormalized(X));
+ DCHECK(IsDigitNormalized(Y));
+ DCHECK(X.len() >= Y.len());
+ DCHECK(Z.len() >= X.len() + Y.len());
+ if (X.len() == 0 || Y.len() == 0) return Z.Clear();
+ digit_t next, next_carry = 0, carry = 0;
+ // Unrolled first iteration: it's trivial.
+ Z[0] = digit_mul(X[0], Y[0], &next);
+ int i = 1;
+ // Unrolled second iteration: a little less setup.
+ if (i < Y.len()) {
+ digit_t zi = next;
+ next = 0;
+ BODY(0, 1);
+ i++;
+ }
+ // Main part: since X.len() >= Y.len() > i, no bounds checks are needed.
+ for (; i < Y.len(); i++) {
+ digit_t zi = digit_add2(next, carry, &carry);
+ next = next_carry + carry;
+ carry = 0;
+ next_carry = 0;
+ BODY(0, i);
+ AddWorkEstimate(i);
+ if (should_terminate()) return;
+ }
+ // Last part: i exceeds Y now, we have to be careful about bounds.
+ int loop_end = X.len() + Y.len() - 2;
+ for (; i <= loop_end; i++) {
+ int max_x_index = std::min(i, X.len() - 1);
+ int max_y_index = Y.len() - 1;
+ int min_x_index = i - max_y_index;
+ digit_t zi = digit_add2(next, carry, &carry);
+ next = next_carry + carry;
+ carry = 0;
+ next_carry = 0;
+ BODY(min_x_index, max_x_index);
+ AddWorkEstimate(max_x_index - min_x_index);
+ if (should_terminate()) return;
+ }
+ // Write the last digit, and zero out any extra space in Z.
+ Z[i++] = digit_add2(next, carry, &carry);
+ DCHECK(carry == 0); // NOLINT(readability/check)
+ for (; i < Z.len(); i++) Z[i] = 0;
+}
+
+#undef BODY
+
+} // namespace bigint
+} // namespace v8
diff --git a/chromium/v8/src/bigint/vector-arithmetic.cc b/chromium/v8/src/bigint/vector-arithmetic.cc
index 9a28b168ba5..734b4439110 100644
--- a/chromium/v8/src/bigint/vector-arithmetic.cc
+++ b/chromium/v8/src/bigint/vector-arithmetic.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/bigint/bigint.h"
+#include "src/bigint/vector-arithmetic.h"
namespace v8 {
namespace bigint {
diff --git a/chromium/v8/src/bigint/vector-arithmetic.h b/chromium/v8/src/bigint/vector-arithmetic.h
new file mode 100644
index 00000000000..617cb20b552
--- /dev/null
+++ b/chromium/v8/src/bigint/vector-arithmetic.h
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper functions that operate on {Digits} vectors of digits.
+
+#ifndef V8_BIGINT_VECTOR_ARITHMETIC_H_
+#define V8_BIGINT_VECTOR_ARITHMETIC_H_
+
+#include "src/bigint/bigint.h"
+
+namespace v8 {
+namespace bigint {
+
+inline bool IsDigitNormalized(Digits X) { return X.len() == 0 || X.msd() != 0; }
+
+} // namespace bigint
+} // namespace v8
+
+#endif // V8_BIGINT_VECTOR_ARITHMETIC_H_
diff --git a/chromium/v8/src/builtins/accessors.cc b/chromium/v8/src/builtins/accessors.cc
index c255184caeb..0285b33e1f6 100644
--- a/chromium/v8/src/builtins/accessors.cc
+++ b/chromium/v8/src/builtins/accessors.cc
@@ -113,8 +113,7 @@ void Accessors::ReconfigureToDataProperty(
v8::Local<v8::Name> key, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope stats_scope(
- isolate, RuntimeCallCounterId::kReconfigureToDataProperty);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kReconfigureToDataProperty);
HandleScope scope(isolate);
Handle<Object> receiver = Utils::OpenHandle(*info.This());
Handle<JSObject> holder =
@@ -155,8 +154,7 @@ Handle<AccessorInfo> Accessors::MakeArgumentsIteratorInfo(Isolate* isolate) {
void Accessors::ArrayLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kArrayLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthGetter);
DisallowGarbageCollection no_gc;
HandleScope scope(isolate);
JSArray holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
@@ -168,8 +166,7 @@ void Accessors::ArrayLengthSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kArrayLengthSetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthSetter);
HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(
@@ -206,7 +203,12 @@ void Accessors::ArrayLengthSetter(
return;
}
- JSArray::SetLength(array, length);
+ if (JSArray::SetLength(array, length).IsNothing()) {
+ // TODO(victorgomes): AccessorNameBooleanSetterCallback does not handle
+ // exceptions.
+ FATAL("Fatal JavaScript invalid array length %u", length);
+ UNREACHABLE();
+ }
uint32_t actual_new_len = 0;
CHECK(array->length().ToArrayLength(&actual_new_len));
@@ -282,8 +284,7 @@ Handle<AccessorInfo> Accessors::MakeModuleNamespaceEntryInfo(
void Accessors::StringLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kStringLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kStringLengthGetter);
DisallowGarbageCollection no_gc;
HandleScope scope(isolate);
@@ -330,8 +331,7 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
void Accessors::FunctionPrototypeGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kFunctionPrototypeGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionPrototypeGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -344,8 +344,7 @@ void Accessors::FunctionPrototypeSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kFunctionPrototypeSetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionPrototypeSetter);
HandleScope scope(isolate);
Handle<Object> value = Utils::OpenHandle(*val);
Handle<JSFunction> object =
@@ -367,8 +366,7 @@ Handle<AccessorInfo> Accessors::MakeFunctionPrototypeInfo(Isolate* isolate) {
void Accessors::FunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kFunctionLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -722,8 +720,7 @@ Handle<AccessorInfo> Accessors::MakeFunctionCallerInfo(Isolate* isolate) {
void Accessors::BoundFunctionLengthGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kBoundFunctionLengthGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kBoundFunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
@@ -749,8 +746,7 @@ Handle<AccessorInfo> Accessors::MakeBoundFunctionLengthInfo(Isolate* isolate) {
void Accessors::BoundFunctionNameGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kBoundFunctionNameGetter);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kBoundFunctionNameGetter);
HandleScope scope(isolate);
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
diff --git a/chromium/v8/src/builtins/aggregate-error.tq b/chromium/v8/src/builtins/aggregate-error.tq
index 9c70ffcb006..c811403274d 100644
--- a/chromium/v8/src/builtins/aggregate-error.tq
+++ b/chromium/v8/src/builtins/aggregate-error.tq
@@ -19,8 +19,9 @@ transitioning javascript builtin AggregateErrorConstructor(
// [[Writable]]: *true*, [[Enumerable]]: *false*, [[Configurable]]: *true*
// c. Perform ! DefinePropertyOrThrow(_O_, *"message"*, _msgDesc_).
const message: JSAny = arguments[1];
- const obj: JSObject =
- ConstructAggregateErrorHelper(context, target, newTarget, message);
+ const options: JSAny = arguments[2];
+ const obj: JSObject = ConstructAggregateErrorHelper(
+ context, target, newTarget, message, options);
// 4. Let errorsList be ? IterableToList(errors).
const errors: JSAny = arguments[0];
@@ -38,7 +39,7 @@ transitioning javascript builtin AggregateErrorConstructor(
}
extern transitioning runtime ConstructAggregateErrorHelper(
- Context, JSFunction, JSAny, Object): JSObject;
+ Context, JSFunction, JSAny, Object, Object): JSObject;
extern transitioning runtime ConstructInternalAggregateErrorHelper(
Context, Object): JSObject;
diff --git a/chromium/v8/src/builtins/arm/builtins-arm.cc b/chromium/v8/src/builtins/arm/builtins-arm.cc
index 817d30fe26a..83252446af8 100644
--- a/chromium/v8/src/builtins/arm/builtins-arm.cc
+++ b/chromium/v8/src/builtins/arm/builtins-arm.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -118,7 +119,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// r0: number of arguments (untagged)
// r1: constructor function
// r3: new target
- __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
// Restore context from the frame.
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -236,7 +237,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r6);
// Call the function.
- __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- r0: constructor result
@@ -337,7 +338,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
@@ -388,16 +389,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mov(r6, r3);
-
__ bind(&loop);
- __ sub(r6, r6, Operand(1), SetCC);
+ __ sub(r3, r3, Operand(1), SetCC);
__ b(lt, &done_loop);
- __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2));
+ __ add(scratch, r2, Operand(r3, LSL, kTaggedSizeLog2));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
-
__ bind(&done_loop);
// Push receiver.
@@ -799,8 +797,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
// Store code entry in the closure.
__ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1051,7 +1049,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ CompareObjectType(feedback_vector, scratch, scratch,
@@ -1124,7 +1122,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ CompareObjectType(feedback_vector, scratch, scratch,
@@ -1646,7 +1644,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1691,7 +1689,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1841,6 +1839,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ ldr(kContextRegister,
+ MemOperand(fp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
@@ -2009,6 +2009,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2020,7 +2021,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -----------------------------------
Register scratch = r8;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
Label ok, fail;
__ AssertNotSmi(r2);
@@ -2278,7 +2279,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ ldrh(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(r1, no_reg, r2, r0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2640,6 +2641,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2652,12 +2658,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r2: pointer to the first argument
__ mov(r5, Operand(r1));
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(r1, Operand(r2));
} else {
@@ -2669,7 +2675,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
@@ -2726,12 +2732,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// r0:r1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// Callee-saved register r4 still holds argc.
: r4;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
__ mov(pc, lr);
// Handling of exception.
@@ -2841,7 +2847,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// If we reach this code, 30 <= exponent <= 83.
// `TryInlineTruncateDoubleToI` above will have truncated any double with an
// exponent lower than 30.
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Scratch is exponent - 1.
__ cmp(scratch, Operand(30 - 1));
__ Check(ge, AbortReason::kUnexpectedValue);
@@ -2957,7 +2963,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ str(r4, MemOperand(r9, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ ldr(r1, MemOperand(r9, kLevelOffset));
__ cmp(r1, r6);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
diff --git a/chromium/v8/src/builtins/arm64/builtins-arm64.cc b/chromium/v8/src/builtins/arm64/builtins-arm64.cc
index d095d60b302..3cf3f0153fc 100644
--- a/chromium/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/chromium/v8/src/builtins/arm64/builtins-arm64.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -99,7 +100,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label already_aligned;
Register argc = x0;
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
@@ -176,7 +177,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// -----------------------------------
// Call the function.
- __ InvokeFunctionWithNewTarget(x1, x3, argc, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(x1, x3, argc, InvokeType::kCall);
// Restore the context from the frame.
__ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -219,7 +220,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ EnterFrame(StackFrame::CONSTRUCT);
Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
// Check that FrameScope pushed the context on to the stack already.
__ Peek(x2, 0);
__ Cmp(x2, cp);
@@ -336,7 +337,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Call the function.
__ Mov(x0, x12);
- __ InvokeFunctionWithNewTarget(x1, x3, x0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(x1, x3, x0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- sp[0*kSystemPointerSize]: implicit receiver
@@ -442,7 +443,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -639,7 +640,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
- __ Mov(kPointerCageBaseRegister, x0);
+ __ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
#endif
}
@@ -925,7 +927,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Don't initialize the reserved registers.
// x26 : root register (kRootRegister).
// x27 : context pointer (cp).
- // x28 : pointer cage base register (kPointerCageBaseRegister).
+ // x28 : pointer cage base register (kPtrComprCageBaseRegister).
// x29 : frame pointer (fp).
Handle<Code> builtin = is_construct
@@ -966,8 +968,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ StoreTaggedField(optimized_code,
FieldMemOperand(closure, JSFunction::kCodeOffset));
__ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -998,7 +1000,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ LeaveFrame(StackFrame::INTERPRETED);
// Drop receiver + arguments.
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Tst(params_size, kSystemPointerSize - 1);
__ Check(eq, AbortReason::kUnexpectedValue);
}
@@ -1230,7 +1232,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
@@ -1288,7 +1290,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// Baseline code frames store the feedback vector where interpreter would
// store the bytecode offset.
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE);
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
}
@@ -1859,7 +1861,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Br(x17);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ ldr(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1903,7 +1905,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -2087,6 +2089,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ ldr(kContextRegister,
+ MemOperand(fp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
@@ -2385,6 +2389,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
} // namespace
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2394,7 +2399,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- x4 : len (number of elements to push from args)
// -- x3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
Label ok, fail;
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
@@ -2618,7 +2623,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Ldrh(x2,
FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(x1, no_reg, x2, x0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ Bind(&class_constructor);
@@ -3036,6 +3041,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -3053,7 +3063,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Register parameters:
// x0: argc (including receiver, untagged)
// x1: target
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// x11: argv (pointer to first argument)
//
// The stack on entry holds the arguments and the receiver, with the receiver
@@ -3085,7 +3095,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// (arg[argc-2]), or just below the receiver in case there are no arguments.
// - Adjust for the arg[] array.
Register temp_argv = x11;
- if (argv_mode == kArgvOnStack) {
+ if (argv_mode == ArgvMode::kStack) {
__ SlotAddress(temp_argv, x0);
// - Adjust for the receiver.
__ Sub(temp_argv, temp_argv, 1 * kSystemPointerSize);
@@ -3096,7 +3106,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, x10, extra_stack_space,
+ save_doubles == SaveFPRegsMode::kSave, x10, extra_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Poke callee-saved registers into reserved space.
@@ -3177,8 +3187,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Peek(argc, 2 * kSystemPointerSize);
__ Peek(target, 3 * kSystemPointerSize);
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, x10, x9);
- if (argv_mode == kArgvOnStack) {
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, x10, x9);
+ if (argv_mode == ArgvMode::kStack) {
// Drop the remaining stack slots and return from the stub.
__ DropArguments(x11);
}
@@ -3247,7 +3257,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Compute the handler entry address and jump to it. We use x17 here for the
// jump target, as this jump can occasionally end up at the start of
- // InterpreterEnterBytecodeDispatch, which when CFI is enabled starts with
+ // InterpreterEnterAtBytecode, which when CFI is enabled starts with
// a "BTI c".
UseScratchRegisterScope temps(masm);
temps.Exclude(x17);
@@ -3296,7 +3306,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
// signed overflow in the int64_t target. Since we've already handled
// exponents >= 84, we can guarantee that 63 <= exponent < 84.
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Cmp(exponent, HeapNumber::kExponentBias + 63);
// Exponents less than this should have been handled by the Fcvt case.
__ Check(ge, AbortReason::kUnexpectedValue);
@@ -3412,7 +3422,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
__ Cmp(w1, level_reg);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
diff --git a/chromium/v8/src/builtins/arraybuffer.tq b/chromium/v8/src/builtins/arraybuffer.tq
index 179c4b38fd2..5794414443b 100644
--- a/chromium/v8/src/builtins/arraybuffer.tq
+++ b/chromium/v8/src/builtins/arraybuffer.tq
@@ -9,21 +9,25 @@ transitioning javascript builtin ArrayBufferPrototypeGetByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get ArrayBuffer.prototype.byteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get ArrayBuffer.prototype.byteLength', receiver);
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
// 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
if (IsSharedArrayBuffer(o)) {
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get ArrayBuffer.prototype.byteLength', receiver);
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
- // 4. If IsDetachedBuffer(O) is true, throw a TypeError exception.
+ // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 5. If IsDetachedBuffer(O) is true, throw a TypeError exception.
// TODO(v8:4895): We don't actually throw here.
- // 5. Let length be O.[[ArrayBufferByteLength]].
+ // 6. Let length be O.[[ArrayBufferByteLength]].
const length = o.byte_length;
- // 6. Return length.
+ // 7. Return length.
return Convert<Number>(length);
}
@@ -32,15 +36,43 @@ transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength(
js-implicit context: NativeContext, receiver: JSAny)(): Number {
// 1. Let O be the this value.
// 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
+ const functionName = 'get SharedArrayBuffer.prototype.byteLength';
const o = Cast<JSArrayBuffer>(receiver) otherwise
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get SharedArrayBuffer.prototype.byteLength', receiver);
- // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ // 3. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]).
if (!IsSharedArrayBuffer(o)) {
ThrowTypeError(
- MessageTemplate::kIncompatibleMethodReceiver,
- 'get SharedArrayBuffer.prototype.byteLength', receiver);
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 5. Let length be O.[[ArrayBufferByteLength]].
+ const length = o.byte_length;
+ // 6. Return length.
+ return Convert<Number>(length);
+}
+
+// #sec-get-resizablearraybuffer.prototype.bytelength
+transitioning javascript builtin ResizableArrayBufferPrototypeGetByteLength(
+ js-implicit context: NativeContext, receiver: JSAny)(): Number {
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ const functionName = 'get ResizableArrayBuffer.prototype.byteLength';
+ const o = Cast<JSArrayBuffer>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ if (!IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsSharedArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
}
// 4. Let length be O.[[ArrayBufferByteLength]].
const length = o.byte_length;
@@ -48,6 +80,55 @@ transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength(
return Convert<Number>(length);
}
+// #sec-get-resizablearraybuffer.prototype.maxbytelength
+transitioning javascript builtin ResizableArrayBufferPrototypeGetMaxByteLength(
+ js-implicit context: NativeContext, receiver: JSAny)(): Number {
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ const functionName = 'get ResizableArrayBuffer.prototype.maxByteLength';
+ const o = Cast<JSArrayBuffer>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ if (!IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception.
+ if (IsSharedArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 4. Let length be O.[[ArrayBufferMaxByteLength]].
+ const length = o.max_byte_length;
+ // 5. Return length.
+ return Convert<Number>(length);
+}
+
+// #sec-get-growablesharedarraybuffer.prototype.maxbytelength
+transitioning javascript builtin
+GrowableSharedArrayBufferPrototypeGetMaxByteLength(
+ js-implicit context: NativeContext, receiver: JSAny)(): Number {
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ const functionName = 'get GrowableSharedArrayBuffer.prototype.maxByteLength';
+ const o = Cast<JSArrayBuffer>(receiver) otherwise
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ if (!IsResizableArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ if (!IsSharedArrayBuffer(o)) {
+ ThrowTypeError(
+ MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver);
+ }
+ // 4. Let length be O.[[ArrayBufferMaxByteLength]].
+ const length = o.max_byte_length;
+ // 5. Return length.
+ return Convert<Number>(length);
+}
+
// #sec-arraybuffer.isview
transitioning javascript builtin ArrayBufferIsView(arg: JSAny): Boolean {
// 1. If Type(arg) is not Object, return false.
diff --git a/chromium/v8/src/builtins/base.tq b/chromium/v8/src/builtins/base.tq
index 08639c04daf..fc84e1a2ce4 100644
--- a/chromium/v8/src/builtins/base.tq
+++ b/chromium/v8/src/builtins/base.tq
@@ -141,6 +141,7 @@ intrinsic %MakeLazy<T: type, A1: type, A2: type, A3: type>(
// template, but Torque doesn't understand how to use templates for extern
// macros, so just add whatever overload definitions you need here.
extern macro RunLazy(Lazy<Smi>): Smi;
+extern macro RunLazy(Lazy<JSAny>): JSAny;
// A Smi value containing a bitfield struct as its integer data.
@useParentTypeChecker type SmiTagged<T : type extends uint31> extends Smi;
@@ -262,6 +263,8 @@ extern enum UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback }
extern operator '==' macro UpdateFeedbackModeEqual(
constexpr UpdateFeedbackMode, constexpr UpdateFeedbackMode): constexpr bool;
+extern enum CallFeedbackContent extends int32 { kTarget, kReceiver }
+
extern enum UnicodeEncoding { UTF16, UTF32 }
// Promise constants
@@ -961,6 +964,8 @@ extern operator '|' macro ConstexprWord32Or(
constexpr int32, constexpr int32): constexpr int32;
extern operator '^' macro Word32Xor(int32, int32): int32;
extern operator '^' macro Word32Xor(uint32, uint32): uint32;
+extern operator '<<' macro ConstexprWord32Shl(
+ constexpr uint32, constexpr int32): uint32;
extern operator '==' macro Word64Equal(int64, int64): bool;
extern operator '==' macro Word64Equal(uint64, uint64): bool;
@@ -1296,6 +1301,9 @@ macro GetFastAliasedArgumentsMap(implicit context: Context)(): Map {
macro GetWeakCellMap(implicit context: Context)(): Map {
return %GetClassMapConstant<WeakCell>();
}
+macro GetPrototypeApplyFunction(implicit context: Context)(): JSFunction {
+ return *NativeContextSlot(ContextSlot::FUNCTION_PROTOTYPE_APPLY_INDEX);
+}
// Call(Context, Target, Receiver, ...Args)
// TODO(joshualitt): Assuming the context parameter is for throwing when Target
@@ -1689,7 +1697,7 @@ extern transitioning runtime SetOwnPropertyIgnoreAttributes(
namespace runtime {
extern runtime
-GetDerivedMap(Context, JSFunction, JSReceiver): Map;
+GetDerivedMap(Context, JSFunction, JSReceiver, JSAny): Map;
}
extern macro IsDeprecatedMap(Map): bool;
diff --git a/chromium/v8/src/builtins/builtins-api.cc b/chromium/v8/src/builtins/builtins-api.cc
index 35e6cc393cb..b39bfc84a55 100644
--- a/chromium/v8/src/builtins/builtins-api.cc
+++ b/chromium/v8/src/builtins/builtins-api.cc
@@ -23,8 +23,7 @@ namespace {
// TODO(dcarney): CallOptimization duplicates this logic, merge.
JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info,
JSReceiver receiver) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kGetCompatibleReceiver);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kGetCompatibleReceiver);
Object recv_type = info.signature();
// No signature, return holder.
if (!recv_type.IsFunctionTemplateInfo()) return receiver;
@@ -171,8 +170,7 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
Handle<Object> receiver,
int argc, Handle<Object> args[],
Handle<HeapObject> new_target) {
- RuntimeCallTimerScope timer(isolate,
- RuntimeCallCounterId::kInvokeApiFunction);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kInvokeApiFunction);
DCHECK(function->IsFunctionTemplateInfo() ||
(function->IsJSFunction() &&
JSFunction::cast(*function).shared().IsApiFunction()));
diff --git a/chromium/v8/src/builtins/builtins-array-gen.cc b/chromium/v8/src/builtins/builtins-array-gen.cc
index 6b522fda6c0..833627c7b41 100644
--- a/chromium/v8/src/builtins/builtins-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-array-gen.cc
@@ -10,6 +10,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/execution/frame-constants.h"
#include "src/heap/factory-inl.h"
#include "src/objects/allocation-site-inl.h"
diff --git a/chromium/v8/src/builtins/builtins-array.cc b/chromium/v8/src/builtins/builtins-array.cc
index d3bbd980a55..6fe1bfc712f 100644
--- a/chromium/v8/src/builtins/builtins-array.cc
+++ b/chromium/v8/src/builtins/builtins-array.cc
@@ -173,7 +173,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> SetLengthProperty(
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
if (!JSArray::HasReadOnlyLength(array)) {
DCHECK_LE(length, kMaxUInt32);
- JSArray::SetLength(array, static_cast<uint32_t>(length));
+ MAYBE_RETURN_NULL(
+ JSArray::SetLength(array, static_cast<uint32_t>(length)));
return receiver;
}
}
@@ -207,16 +208,16 @@ V8_WARN_UNUSED_RESULT Object GenericArrayFill(Isolate* isolate,
return *receiver;
}
-V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
+V8_WARN_UNUSED_RESULT Maybe<bool> TryFastArrayFill(
Isolate* isolate, BuiltinArguments* args, Handle<JSReceiver> receiver,
Handle<Object> value, double start_index, double end_index) {
// If indices are too large, use generic path since they are stored as
// properties, not in the element backing store.
- if (end_index > kMaxUInt32) return false;
- if (!receiver->IsJSObject()) return false;
+ if (end_index > kMaxUInt32) return Just(false);
+ if (!receiver->IsJSObject()) return Just(false);
if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, args, 1, 1)) {
- return false;
+ return Just(false);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -240,14 +241,14 @@ V8_WARN_UNUSED_RESULT bool TryFastArrayFill(
CHECK(DoubleToUint32IfEqualToSelf(end_index, &end));
ElementsAccessor* accessor = array->GetElementsAccessor();
- accessor->Fill(array, value, start, end);
- return true;
+ RETURN_ON_EXCEPTION_VALUE(isolate, accessor->Fill(array, value, start, end),
+ Nothing<bool>());
+ return Just(true);
}
} // namespace
BUILTIN(ArrayPrototypeFill) {
HandleScope scope(isolate);
-
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) {
if (!isolate->debug()->PerformSideEffectCheckForObject(args.receiver())) {
return ReadOnlyRoots(isolate).exception();
@@ -292,10 +293,12 @@ BUILTIN(ArrayPrototypeFill) {
Handle<Object> value = args.atOrUndefined(isolate, 1);
- if (TryFastArrayFill(isolate, &args, receiver, value, start_index,
- end_index)) {
- return *receiver;
- }
+ bool success;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, success,
+ TryFastArrayFill(isolate, &args, receiver, value, start_index,
+ end_index));
+ if (success) return *receiver;
return GenericArrayFill(isolate, receiver, value, start_index, end_index);
}
@@ -385,7 +388,9 @@ BUILTIN(ArrayPush) {
}
ElementsAccessor* accessor = array->GetElementsAccessor();
- uint32_t new_length = accessor->Push(array, &args, to_add);
+ uint32_t new_length;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_length, accessor->Push(array, &args, to_add));
return *isolate->factory()->NewNumberFromUint((new_length));
}
@@ -468,7 +473,8 @@ BUILTIN(ArrayPop) {
Handle<Object> result;
if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
// Fast Elements Path
- result = array->GetElementsAccessor()->Pop(array);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, array->GetElementsAccessor()->Pop(array));
} else {
// Use Slow Lookup otherwise
uint32_t new_length = len - 1;
@@ -483,7 +489,9 @@ BUILTIN(ArrayPop) {
isolate->factory()->length_string(),
Object::TypeOf(isolate, array), array));
}
- JSArray::SetLength(array, new_length);
+ bool set_len_ok;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, set_len_ok, JSArray::SetLength(array, new_length));
}
return *result;
@@ -595,7 +603,8 @@ BUILTIN(ArrayShift) {
if (CanUseFastArrayShift(isolate, receiver)) {
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
- return *array->GetElementsAccessor()->Shift(array);
+ RETURN_RESULT_OR_FAILURE(isolate,
+ array->GetElementsAccessor()->Shift(array));
}
return GenericArrayShift(isolate, receiver, length);
@@ -623,7 +632,9 @@ BUILTIN(ArrayUnshift) {
DCHECK(!JSArray::HasReadOnlyLength(array));
ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Unshift(array, &args, to_add);
+ uint32_t new_length;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, new_length, accessor->Unshift(array, &args, to_add));
return Smi::FromInt(new_length);
}
@@ -742,7 +753,7 @@ class ArrayConcatVisitor {
array, fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
array->set_length(*length);
array->set_elements(*storage_fixed_array());
- array->synchronized_set_map(*map);
+ array->set_map(*map, kReleaseStore);
return array;
}
@@ -880,9 +891,11 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle<JSArray> array) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
// External arrays are always dense.
return length;
+
+#undef TYPED_ARRAY_CASE
case NO_ELEMENTS:
return 0;
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
@@ -956,9 +969,7 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- {
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) {
size_t length = Handle<JSTypedArray>::cast(object)->length();
if (range <= length) {
length = range;
@@ -974,6 +985,11 @@ void CollectElementIndices(Isolate* isolate, Handle<JSObject> object,
if (length == range) return; // All indices accounted for already.
break;
}
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ // TODO(v8:11111): Support RAB / GSAB.
+ UNREACHABLE();
+
+#undef TYPED_ARRAY_CASE
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
DisallowGarbageCollection no_gc;
@@ -1199,8 +1215,11 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
break;
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
return IterateElementsSlow(isolate, receiver, length, visitor);
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ // TODO(v8:11111): Support RAB / GSAB.
+ UNREACHABLE();
+#undef TYPED_ARRAY_CASE
case FAST_STRING_WRAPPER_ELEMENTS:
case SLOW_STRING_WRAPPER_ELEMENTS:
// |array| is guaranteed to be an array or typed array.
diff --git a/chromium/v8/src/builtins/builtins-arraybuffer.cc b/chromium/v8/src/builtins/builtins-arraybuffer.cc
index 0f5f9051861..2d07847d570 100644
--- a/chromium/v8/src/builtins/builtins-arraybuffer.cc
+++ b/chromium/v8/src/builtins/builtins-arraybuffer.cc
@@ -23,17 +23,43 @@ namespace internal {
name)); \
}
+#define CHECK_RESIZABLE(expected, name, method) \
+ if (name->is_resizable() != expected) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, \
+ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \
+ isolate->factory()->NewStringFromAsciiChecked(method), \
+ name)); \
+ }
+
// -----------------------------------------------------------------------------
// ES#sec-arraybuffer-objects
namespace {
+bool RoundUpToPageSize(size_t byte_length, size_t page_size,
+ size_t max_allowed_byte_length, size_t* pages) {
+ size_t bytes_wanted = RoundUp(byte_length, page_size);
+ if (bytes_wanted > max_allowed_byte_length) {
+ return false;
+ }
+ *pages = bytes_wanted / page_size;
+ return true;
+}
+
Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
- InitializedFlag initialized) {
- SharedFlag shared = (*target != target->native_context().array_buffer_fun())
- ? SharedFlag::kShared
- : SharedFlag::kNotShared;
+ Handle<Object> max_length, InitializedFlag initialized) {
+ SharedFlag shared =
+ (*target != target->native_context().array_buffer_fun() &&
+ *target != target->native_context().resizable_array_buffer_fun())
+ ? SharedFlag::kShared
+ : SharedFlag::kNotShared;
+ ResizableFlag resizable =
+ (*target == target->native_context().resizable_array_buffer_fun() ||
+ *target == target->native_context().growable_shared_array_buffer_fun())
+ ? ResizableFlag::kResizable
+ : ResizableFlag::kNotResizable;
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -42,9 +68,10 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
// Ensure that all fields are initialized because BackingStore::Allocate is
// allowed to GC. Note that we cannot move the allocation of the ArrayBuffer
// after BackingStore::Allocate because of the spec.
- array_buffer->Setup(shared, nullptr);
+ array_buffer->Setup(shared, resizable, nullptr);
size_t byte_length;
+ size_t max_byte_length = 0;
if (!TryNumberToSize(*length, &byte_length) ||
byte_length > JSArrayBuffer::kMaxByteLength) {
// ToNumber failed.
@@ -52,8 +79,46 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- auto backing_store =
- BackingStore::Allocate(isolate, byte_length, shared, initialized);
+ std::unique_ptr<BackingStore> backing_store;
+ if (resizable == ResizableFlag::kNotResizable) {
+ backing_store =
+ BackingStore::Allocate(isolate, byte_length, shared, initialized);
+ } else {
+ Handle<Object> number_max_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_max_length,
+ Object::ToInteger(isolate, max_length));
+
+ if (!TryNumberToSize(*number_max_length, &max_byte_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
+ }
+ if (byte_length > max_byte_length) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
+ }
+
+ size_t page_size = AllocatePageSize();
+ size_t initial_pages;
+ if (!RoundUpToPageSize(byte_length, page_size,
+ JSArrayBuffer::kMaxByteLength, &initial_pages)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
+ }
+
+ size_t max_pages;
+ if (!RoundUpToPageSize(max_byte_length, page_size,
+ JSArrayBuffer::kMaxByteLength, &max_pages)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength));
+ }
+ constexpr bool kIsWasmMemory = false;
+ backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory(
+ isolate, byte_length, page_size, initial_pages, max_pages,
+ kIsWasmMemory, shared);
+ }
if (!backing_store) {
// Allocation of backing store failed.
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -61,6 +126,7 @@ Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
}
array_buffer->Attach(std::move(backing_store));
+ array_buffer->set_max_byte_length(max_byte_length);
return *array_buffer;
}
@@ -71,7 +137,10 @@ BUILTIN(ArrayBufferConstructor) {
HandleScope scope(isolate);
Handle<JSFunction> target = args.target();
DCHECK(*target == target->native_context().array_buffer_fun() ||
- *target == target->native_context().shared_array_buffer_fun());
+ *target == target->native_context().shared_array_buffer_fun() ||
+ *target == target->native_context().resizable_array_buffer_fun() ||
+ *target ==
+ target->native_context().growable_shared_array_buffer_fun());
if (args.new_target()->IsUndefined(isolate)) { // [[Call]]
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kConstructorNotFunction,
@@ -87,10 +156,11 @@ BUILTIN(ArrayBufferConstructor) {
if (number_length->Number() < 0.0) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
- }
+ }
- return ConstructBuffer(isolate, target, new_target, number_length,
- InitializedFlag::kZeroInitialized);
+ Handle<Object> max_length = args.atOrUndefined(isolate, 2);
+ return ConstructBuffer(isolate, target, new_target, number_length, max_length,
+ InitializedFlag::kZeroInitialized);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
@@ -101,7 +171,7 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) {
Handle<JSFunction> target(isolate->native_context()->array_buffer_fun(),
isolate);
Handle<Object> length = args.atOrUndefined(isolate, 1);
- return ConstructBuffer(isolate, target, target, length,
+ return ConstructBuffer(isolate, target, target, length, Handle<Object>(),
InitializedFlag::kUninitialized);
}
@@ -119,6 +189,8 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate,
// * [SAB] If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
CHECK_SHARED(is_shared, array_buffer, kMethodName);
+ CHECK_RESIZABLE(false, array_buffer, kMethodName);
+
// * [AB] If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
if (!is_shared && array_buffer->was_detached()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -280,5 +352,158 @@ BUILTIN(ArrayBufferPrototypeSlice) {
return SliceHelper(args, isolate, kMethodName, false);
}
+static Object ResizeHelper(BuiltinArguments args, Isolate* isolate,
+ const char* kMethodName, bool is_shared) {
+ HandleScope scope(isolate);
+
+ // 1 Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]).
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
+ CHECK_RESIZABLE(true, array_buffer, kMethodName);
+
+ // [RAB] 3. If IsSharedArrayBuffer(O) is true, throw a *TypeError* exception
+ // [GSAB] 3. If IsSharedArrayBuffer(O) is false, throw a *TypeError* exception
+ CHECK_SHARED(is_shared, array_buffer, kMethodName);
+
+ // Let newByteLength to ? ToIntegerOrInfinity(newLength).
+ Handle<Object> new_length = args.at(1);
+ Handle<Object> number_new_byte_length;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_new_byte_length,
+ Object::ToInteger(isolate, new_length));
+
+ // [RAB] If IsDetachedBuffer(O) is true, throw a TypeError exception.
+ if (!is_shared && array_buffer->was_detached()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kDetachedOperation,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ // [RAB] If newByteLength < 0 or newByteLength >
+ // O.[[ArrayBufferMaxByteLength]], throw a RangeError exception.
+
+ // [GSAB] If newByteLength < currentByteLength or newByteLength >
+ // O.[[ArrayBufferMaxByteLength]], throw a RangeError exception.
+ size_t new_byte_length;
+ if (!TryNumberToSize(*number_new_byte_length, &new_byte_length)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ if (is_shared && new_byte_length < array_buffer->byte_length()) {
+ // GrowableSharedArrayBuffer is only allowed to grow.
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ if (new_byte_length > array_buffer->max_byte_length()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+
+ size_t page_size = AllocatePageSize();
+ size_t new_committed_pages;
+ bool round_return_value =
+ RoundUpToPageSize(new_byte_length, page_size,
+ JSArrayBuffer::kMaxByteLength, &new_committed_pages);
+ CHECK(round_return_value);
+
+ // [RAB] Let hostHandled be ? HostResizeArrayBuffer(O, newByteLength).
+ // [GSAB] Let hostHandled be ? HostGrowArrayBuffer(O, newByteLength).
+ // If hostHandled is handled, return undefined.
+
+ // TODO(v8:11111): Wasm integration.
+
+ if (!is_shared) {
+ // [RAB] Let oldBlock be O.[[ArrayBufferData]].
+ // [RAB] Let newBlock be ? CreateByteDataBlock(newByteLength).
+ // [RAB] Let copyLength be min(newByteLength, O.[[ArrayBufferByteLength]]).
+ // [RAB] Perform CopyDataBlockBytes(newBlock, 0, oldBlock, 0, copyLength).
+ // [RAB] NOTE: Neither creation of the new Data Block nor copying from the
+ // old Data Block are observable. Implementations reserve the right to
+ // implement this method as in-place growth or shrinkage.
+ if (array_buffer->GetBackingStore()->ResizeInPlace(
+ isolate, new_byte_length, new_committed_pages * page_size) !=
+ BackingStore::ResizeOrGrowResult::kSuccess) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kOutOfMemory,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+ // [RAB] Set O.[[ArrayBufferByteLength]] to newLength.
+ array_buffer->set_byte_length(new_byte_length);
+ } else {
+ // [GSAB] (Detailed description of the algorithm omitted.)
+ auto result = array_buffer->GetBackingStore()->GrowInPlace(
+ isolate, new_byte_length, new_committed_pages * page_size);
+ if (result == BackingStore::ResizeOrGrowResult::kFailure) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kOutOfMemory,
+ isolate->factory()->NewStringFromAsciiChecked(
+ kMethodName)));
+ }
+ if (result == BackingStore::ResizeOrGrowResult::kRace) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewRangeError(
+ MessageTemplate::kInvalidArrayBufferResizeLength,
+ isolate->factory()->NewStringFromAsciiChecked(kMethodName)));
+ }
+ // Invariant: byte_length for a GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ CHECK_EQ(0, array_buffer->byte_length());
+ }
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
+// ES #sec-get-growablesharedarraybuffer.prototype.bytelength
+// get GrowableSharedArrayBuffer.prototype.byteLength
+BUILTIN(GrowableSharedArrayBufferPrototypeGetByteLength) {
+ const char* const kMethodName =
+ "get GrowableSharedArrayBuffer.prototype.byteLength";
+ HandleScope scope(isolate);
+
+ // 1. Let O be the this value.
+ // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxLength]]).
+ CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName);
+ CHECK_RESIZABLE(true, array_buffer, kMethodName);
+ // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception.
+ CHECK_SHARED(true, array_buffer, kMethodName);
+
+ // 4. Let length be ArrayBufferByteLength(O, SeqCst).
+
+ // Invariant: byte_length for GSAB is 0 (it needs to be read from the
+ // BackingStore).
+ DCHECK_EQ(0, array_buffer->byte_length());
+
+ size_t byte_length =
+ array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst);
+
+ // 5. Return length.
+ return *isolate->factory()->NewNumberFromSize(byte_length);
+}
+
+// ES #sec-resizablearraybuffer.prototype.resize
+// ResizableArrayBuffer.prototype.resize(new_size))
+BUILTIN(ResizableArrayBufferPrototypeResize) {
+ const char* const kMethodName = "ResizableArrayBuffer.prototype.resize";
+ constexpr bool kIsShared = false;
+ return ResizeHelper(args, isolate, kMethodName, kIsShared);
+}
+
+// ES #sec-growablesharedarraybuffer.prototype.grow
+// GrowableSharedArrayBuffer.prototype.grow(new_size))
+BUILTIN(GrowableSharedArrayBufferPrototypeGrow) {
+ const char* const kMethodName = "GrowableSharedArrayBuffer.prototype.grow";
+ constexpr bool kIsShared = true;
+ return ResizeHelper(args, isolate, kMethodName, kIsShared);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/builtins/builtins-async-function-gen.cc b/chromium/v8/src/builtins/builtins-async-function-gen.cc
index 49b00caa048..1644997ed01 100644
--- a/chromium/v8/src/builtins/builtins-async-function-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-function-gen.cc
@@ -157,12 +157,14 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(
async_function_object, JSAsyncFunctionObject::kPromiseOffset, promise);
+ RunContextPromiseHookInit(context, promise, UndefinedConstant());
+
// Fire promise hooks if enabled and push the Promise under construction
// in an async function on the catch prediction stack to handle exceptions
// thrown before the first await.
Label if_instrumentation(this, Label::kDeferred),
if_instrumentation_done(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
+ Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
&if_instrumentation, &if_instrumentation_done);
BIND(&if_instrumentation);
{
diff --git a/chromium/v8/src/builtins/builtins-async-gen.cc b/chromium/v8/src/builtins/builtins-async-gen.cc
index 9ee6037b2bd..629f1e94fa4 100644
--- a/chromium/v8/src/builtins/builtins-async-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-gen.cc
@@ -97,18 +97,11 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOld(
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
- // Deal with PromiseHooks and debug support in the runtime. This
- // also allocates the throwaway promise, which is only needed in
- // case of PromiseHooks or debugging.
- Label if_debugging(this, Label::kDeferred), do_resolve_promise(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
- &if_debugging, &do_resolve_promise);
- BIND(&if_debugging);
- var_throwaway =
- CAST(CallRuntime(Runtime::kAwaitPromisesInitOld, context, value, promise,
- outer_promise, on_reject, is_predicted_as_caught));
- Goto(&do_resolve_promise);
- BIND(&do_resolve_promise);
+ RunContextPromiseHookInit(context, promise, outer_promise);
+
+ InitAwaitPromise(Runtime::kAwaitPromisesInitOld, context, value, promise,
+ outer_promise, on_reject, is_predicted_as_caught,
+ &var_throwaway);
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
CallBuiltin(Builtins::kResolvePromise, context, promise, value);
@@ -168,21 +161,46 @@ TNode<Object> AsyncBuiltinsAssembler::AwaitOptimized(
TVARIABLE(HeapObject, var_throwaway, UndefinedConstant());
+ InitAwaitPromise(Runtime::kAwaitPromisesInit, context, promise, promise,
+ outer_promise, on_reject, is_predicted_as_caught,
+ &var_throwaway);
+
+ return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
+ on_resolve, on_reject, var_throwaway.value());
+}
+
+void AsyncBuiltinsAssembler::InitAwaitPromise(
+ Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
+ TNode<Object> promise, TNode<Object> outer_promise,
+ TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
+ TVariable<HeapObject>* var_throwaway) {
// Deal with PromiseHooks and debug support in the runtime. This
// also allocates the throwaway promise, which is only needed in
// case of PromiseHooks or debugging.
- Label if_debugging(this, Label::kDeferred), do_perform_promise_then(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(),
- &if_debugging, &do_perform_promise_then);
+ Label if_debugging(this, Label::kDeferred),
+ if_promise_hook(this, Label::kDeferred),
+ not_debugging(this),
+ do_nothing(this);
+ TNode<Uint32T> promiseHookFlags = PromiseHookFlags();
+ Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ promiseHookFlags), &if_debugging, &not_debugging);
BIND(&if_debugging);
- var_throwaway =
- CAST(CallRuntime(Runtime::kAwaitPromisesInit, context, promise, promise,
+ *var_throwaway =
+ CAST(CallRuntime(id, context, value, promise,
outer_promise, on_reject, is_predicted_as_caught));
- Goto(&do_perform_promise_then);
- BIND(&do_perform_promise_then);
-
- return CallBuiltin(Builtins::kPerformPromiseThen, native_context, promise,
- on_resolve, on_reject, var_throwaway.value());
+ Goto(&do_nothing);
+ BIND(&not_debugging);
+
+ // This call to NewJSPromise is to keep behaviour parity with what happens
+ // in Runtime::kAwaitPromisesInit above if native hooks are set. It will
+ // create a throwaway promise that will trigger an init event and will get
+ // passed into Builtins::kPerformPromiseThen below.
+ Branch(IsContextPromiseHookEnabled(promiseHookFlags), &if_promise_hook,
+ &do_nothing);
+ BIND(&if_promise_hook);
+ *var_throwaway = NewJSPromise(context, promise);
+ Goto(&do_nothing);
+ BIND(&do_nothing);
}
TNode<Object> AsyncBuiltinsAssembler::Await(
diff --git a/chromium/v8/src/builtins/builtins-async-gen.h b/chromium/v8/src/builtins/builtins-async-gen.h
index 833e78d45d5..34b7a0ce1d6 100644
--- a/chromium/v8/src/builtins/builtins-async-gen.h
+++ b/chromium/v8/src/builtins/builtins-async-gen.h
@@ -62,6 +62,12 @@ class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
TNode<SharedFunctionInfo> on_resolve_sfi,
TNode<SharedFunctionInfo> on_reject_sfi,
TNode<Oddball> is_predicted_as_caught);
+
+ void InitAwaitPromise(
+ Runtime::FunctionId id, TNode<Context> context, TNode<Object> value,
+ TNode<Object> promise, TNode<Object> outer_promise,
+ TNode<HeapObject> on_reject, TNode<Oddball> is_predicted_as_caught,
+ TVariable<HeapObject>* var_throwaway);
};
} // namespace internal
diff --git a/chromium/v8/src/builtins/builtins-async-generator-gen.cc b/chromium/v8/src/builtins/builtins-async-generator-gen.cc
index 03df9e307c7..0e94fd20939 100644
--- a/chromium/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-async-generator-gen.cc
@@ -518,7 +518,7 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
// the "promiseResolve" hook would not be fired otherwise.
Label if_fast(this), if_slow(this, Label::kDeferred), return_promise(this);
GotoIfForceSlowPath(&if_slow);
- GotoIf(IsPromiseHookEnabled(), &if_slow);
+ GotoIf(IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(), &if_slow);
Branch(IsPromiseThenProtectorCellInvalid(), &if_slow, &if_fast);
BIND(&if_fast);
diff --git a/chromium/v8/src/builtins/builtins-call-gen.cc b/chromium/v8/src/builtins/builtins-call-gen.cc
index 664f57aadb2..89bf77d0b07 100644
--- a/chromium/v8/src/builtins/builtins-call-gen.cc
+++ b/chromium/v8/src/builtins/builtins-call-gen.cc
@@ -64,38 +64,45 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
masm->isolate()->builtins()->CallFunction());
}
+// TODO(cbruni): Try reusing code between builtin versions to avoid binary
+// overhead.
+TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline_Compact,
+ CallOrConstructBuiltinsAssembler) {
+ auto receiver = UndefinedConstant();
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNullOrUndefined, receiver);
+}
+
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline,
CallOrConstructBuiltinsAssembler) {
- auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
- auto context = LoadContextFromBaseline();
- auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
- TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
- argc);
+ auto receiver = UndefinedConstant();
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNullOrUndefined, argc,
+ slot, receiver);
+}
+
+TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline_Compact,
+ CallOrConstructBuiltinsAssembler) {
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNotNullOrUndefined);
}
TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline,
CallOrConstructBuiltinsAssembler) {
- auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
- auto context = LoadContextFromBaseline();
- auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
- TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
- argc);
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsNotNullOrUndefined, argc,
+ slot);
+}
+
+TF_BUILTIN(Call_ReceiverIsAny_Baseline_Compact,
+ CallOrConstructBuiltinsAssembler) {
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsAny);
}
TF_BUILTIN(Call_ReceiverIsAny_Baseline, CallOrConstructBuiltinsAssembler) {
- auto target = Parameter<Object>(Descriptor::kFunction);
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
- auto context = LoadContextFromBaseline();
- auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
- TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
+ CallReceiver<Descriptor>(Builtins::kCall_ReceiverIsAny, argc, slot);
}
TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
@@ -105,7 +112,9 @@ TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback,
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target,
argc);
}
@@ -117,7 +126,9 @@ TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback,
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target,
argc);
}
@@ -128,7 +139,9 @@ TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc);
}
@@ -449,6 +462,43 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread(
}
}
+template <class Descriptor>
+void CallOrConstructBuiltinsAssembler::CallReceiver(
+ Builtins::Name id, base::Optional<TNode<Object>> receiver) {
+ static_assert(std::is_same<Descriptor,
+ CallTrampoline_Baseline_CompactDescriptor>::value,
+ "Incompatible Descriptor");
+ auto bitfield = UncheckedParameter<Word32T>(Descriptor::kBitField);
+ TNode<Int32T> argc =
+ Signed(DecodeWord32<
+ CallTrampoline_Baseline_CompactDescriptor::ArgumentCountField>(
+ bitfield));
+ TNode<UintPtrT> slot = ChangeUint32ToWord(
+ DecodeWord32<CallTrampoline_Baseline_CompactDescriptor::SlotField>(
+ bitfield));
+ CallReceiver<Descriptor>(id, argc, slot, receiver);
+}
+
+template <class Descriptor>
+void CallOrConstructBuiltinsAssembler::CallReceiver(
+ Builtins::Name id, TNode<Int32T> argc, TNode<UintPtrT> slot,
+ base::Optional<TNode<Object>> maybe_receiver) {
+ auto target = Parameter<Object>(Descriptor::kFunction);
+ auto context = LoadContextFromBaseline();
+ auto feedback_vector = LoadFeedbackVectorFromBaseline();
+ LazyNode<Object> receiver = [=] {
+ if (maybe_receiver) {
+ return *maybe_receiver;
+ } else {
+ CodeStubArguments args(this, argc);
+ return args.GetReceiver();
+ }
+ };
+
+ CollectCallFeedback(target, receiver, context, feedback_vector, slot);
+ TailCallBuiltin(id, context, target, argc);
+}
+
TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) {
auto target = Parameter<Object>(Descriptor::kTarget);
base::Optional<TNode<Object>> new_target = base::nullopt;
@@ -464,7 +514,9 @@ TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
CallOrConstructWithArrayLike(target, new_target, arguments_list, context);
}
@@ -485,7 +537,10 @@ TF_BUILTIN(CallWithSpread_Baseline, CallOrConstructBuiltinsAssembler) {
auto context = LoadContextFromBaseline();
auto feedback_vector = LoadFeedbackVectorFromBaseline();
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ CodeStubArguments args(this, args_count);
+ CollectCallFeedback(
+ target, [=] { return args.GetReceiver(); }, context, feedback_vector,
+ slot);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
@@ -497,7 +552,9 @@ TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
auto feedback_vector = Parameter<FeedbackVector>(Descriptor::kFeedbackVector);
auto slot = UncheckedParameter<UintPtrT>(Descriptor::kSlot);
- CollectCallFeedback(target, context, feedback_vector, slot);
+ auto receiver = Parameter<Object>(Descriptor::kReceiver);
+ CollectCallFeedback(
+ target, [=] { return receiver; }, context, feedback_vector, slot);
CallOrConstructWithSpread(target, new_target, spread, args_count, context);
}
diff --git a/chromium/v8/src/builtins/builtins-call-gen.h b/chromium/v8/src/builtins/builtins-call-gen.h
index c938662d5e5..ff4d998ff3a 100644
--- a/chromium/v8/src/builtins/builtins-call-gen.h
+++ b/chromium/v8/src/builtins/builtins-call-gen.h
@@ -30,6 +30,13 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler {
TNode<Object> spread, TNode<Int32T> args_count,
TNode<Context> context);
+ template <class Descriptor>
+ void CallReceiver(Builtins::Name id,
+ base::Optional<TNode<Object>> = base::nullopt);
+ template <class Descriptor>
+ void CallReceiver(Builtins::Name id, TNode<Int32T> argc, TNode<UintPtrT> slot,
+ base::Optional<TNode<Object>> = base::nullopt);
+
enum class CallFunctionTemplateMode : uint8_t {
kCheckAccess,
kCheckCompatibleReceiver,
diff --git a/chromium/v8/src/builtins/builtins-debug-gen.cc b/chromium/v8/src/builtins/builtins-debug-gen.cc
deleted file mode 100644
index 9d47cf16006..00000000000
--- a/chromium/v8/src/builtins/builtins-debug-gen.cc
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/debug/debug.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void Builtins::Generate_FrameDropperTrampoline(MacroAssembler* masm) {
- DebugCodegen::GenerateFrameDropperTrampoline(masm);
-}
-
-void Builtins::Generate_HandleDebuggerStatement(MacroAssembler* masm) {
- DebugCodegen::GenerateHandleDebuggerStatement(masm);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/builtins/builtins-definitions.h b/chromium/v8/src/builtins/builtins-definitions.h
index b0e608418eb..78255a30e93 100644
--- a/chromium/v8/src/builtins/builtins-definitions.h
+++ b/chromium/v8/src/builtins/builtins-definitions.h
@@ -50,8 +50,13 @@ namespace internal {
ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \
ASM(Call_ReceiverIsAny, CallTrampoline) \
+ TFC(Call_ReceiverIsNullOrUndefined_Baseline_Compact, \
+ CallTrampoline_Baseline_Compact) \
TFC(Call_ReceiverIsNullOrUndefined_Baseline, CallTrampoline_Baseline) \
+ TFC(Call_ReceiverIsNotNullOrUndefined_Baseline_Compact, \
+ CallTrampoline_Baseline_Compact) \
TFC(Call_ReceiverIsNotNullOrUndefined_Baseline, CallTrampoline_Baseline) \
+ TFC(Call_ReceiverIsAny_Baseline_Compact, CallTrampoline_Baseline_Compact) \
TFC(Call_ReceiverIsAny_Baseline, CallTrampoline_Baseline) \
TFC(Call_ReceiverIsNullOrUndefined_WithFeedback, \
CallTrampoline_WithFeedback) \
@@ -133,13 +138,13 @@ namespace internal {
InterpreterPushArgsThenConstruct) \
ASM(InterpreterPushArgsThenConstructWithFinalSpread, \
InterpreterPushArgsThenConstruct) \
- ASM(InterpreterEnterBytecodeAdvance, Dummy) \
- ASM(InterpreterEnterBytecodeDispatch, Dummy) \
+ ASM(InterpreterEnterAtBytecode, Dummy) \
+ ASM(InterpreterEnterAtNextBytecode, Dummy) \
ASM(InterpreterOnStackReplacement, ContextOnly) \
\
/* Baseline Compiler */ \
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
- ASM(BaselineOnStackReplacement, ContextOnly) \
+ ASM(BaselineOnStackReplacement, Void) \
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
ASM(BaselineEnterAtBytecode, Void) \
ASM(BaselineEnterAtNextBytecode, Void) \
@@ -200,8 +205,6 @@ namespace internal {
\
/* Debugger */ \
TFJ(DebugBreakTrampoline, kDontAdaptArgumentsSentinel) \
- ASM(FrameDropperTrampoline, FrameDropperTrampoline) \
- ASM(HandleDebuggerStatement, ContextOnly) \
\
/* Type conversions */ \
TFC(ToNumber, TypeConversion) \
@@ -770,6 +773,11 @@ namespace internal {
ASM(RegExpInterpreterTrampoline, CCall) \
ASM(RegExpExperimentalTrampoline, CCall) \
\
+ /* ResizableArrayBuffer & GrowableSharedArrayBuffer */ \
+ CPP(ResizableArrayBufferPrototypeResize) \
+ CPP(GrowableSharedArrayBufferPrototypeGrow) \
+ CPP(GrowableSharedArrayBufferPrototypeGetByteLength) \
+ \
/* Set */ \
TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
@@ -863,6 +871,7 @@ namespace internal {
IF_WASM(ASM, GenericJSToWasmWrapper, Dummy) \
IF_WASM(ASM, WasmCompileLazy, Dummy) \
IF_WASM(ASM, WasmDebugBreak, Dummy) \
+ IF_WASM(ASM, WasmOnStackReplace, Dummy) \
IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \
IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \
IF_WASM(TFC, WasmI32AtomicWait32, WasmI32AtomicWait32) \
@@ -983,6 +992,7 @@ namespace internal {
CPP(CollatorPrototypeCompare) \
/* ecma402 #sec-intl.collator.supportedlocalesof */ \
CPP(CollatorSupportedLocalesOf) \
+ /* ecma402 #sec-intl.collator.prototype.resolvedoptions */ \
CPP(CollatorPrototypeResolvedOptions) \
/* ecma402 #sup-date.prototype.tolocaledatestring */ \
CPP(DatePrototypeToLocaleDateString) \
@@ -1028,21 +1038,46 @@ namespace internal {
CPP(ListFormatSupportedLocalesOf) \
/* ecma402 #sec-intl-locale-constructor */ \
CPP(LocaleConstructor) \
+ /* ecma402 #sec-Intl.Locale.prototype.baseName */ \
CPP(LocalePrototypeBaseName) \
+ /* ecma402 #sec-Intl.Locale.prototype.calendar */ \
CPP(LocalePrototypeCalendar) \
+ /* ecma402 #sec-Intl.Locale.prototype.calendars */ \
+ CPP(LocalePrototypeCalendars) \
+ /* ecma402 #sec-Intl.Locale.prototype.caseFirst */ \
CPP(LocalePrototypeCaseFirst) \
+ /* ecma402 #sec-Intl.Locale.prototype.collation */ \
CPP(LocalePrototypeCollation) \
+ /* ecma402 #sec-Intl.Locale.prototype.collations */ \
+ CPP(LocalePrototypeCollations) \
+ /* ecma402 #sec-Intl.Locale.prototype.hourCycle */ \
CPP(LocalePrototypeHourCycle) \
+ /* ecma402 #sec-Intl.Locale.prototype.hourCycles */ \
+ CPP(LocalePrototypeHourCycles) \
+ /* ecma402 #sec-Intl.Locale.prototype.language */ \
CPP(LocalePrototypeLanguage) \
/* ecma402 #sec-Intl.Locale.prototype.maximize */ \
CPP(LocalePrototypeMaximize) \
/* ecma402 #sec-Intl.Locale.prototype.minimize */ \
CPP(LocalePrototypeMinimize) \
+ /* ecma402 #sec-Intl.Locale.prototype.numeric */ \
CPP(LocalePrototypeNumeric) \
+ /* ecma402 #sec-Intl.Locale.prototype.numberingSystem */ \
CPP(LocalePrototypeNumberingSystem) \
+ /* ecma402 #sec-Intl.Locale.prototype.numberingSystems */ \
+ CPP(LocalePrototypeNumberingSystems) \
+ /* ecma402 #sec-Intl.Locale.prototype.region */ \
CPP(LocalePrototypeRegion) \
+ /* ecma402 #sec-Intl.Locale.prototype.script */ \
CPP(LocalePrototypeScript) \
+ /* ecma402 #sec-Intl.Locale.prototype.textInfo */ \
+ CPP(LocalePrototypeTextInfo) \
+ /* ecma402 #sec-Intl.Locale.prototype.timezones */ \
+ CPP(LocalePrototypeTimeZones) \
+ /* ecma402 #sec-Intl.Locale.prototype.toString */ \
CPP(LocalePrototypeToString) \
+ /* ecma402 #sec-Intl.Locale.prototype.weekInfo */ \
+ CPP(LocalePrototypeWeekInfo) \
/* ecma402 #sec-intl.numberformat */ \
CPP(NumberFormatConstructor) \
/* ecma402 #sec-number-format-functions */ \
@@ -1057,6 +1092,7 @@ namespace internal {
CPP(NumberFormatSupportedLocalesOf) \
/* ecma402 #sec-intl.pluralrules */ \
CPP(PluralRulesConstructor) \
+ /* ecma402 #sec-intl.pluralrules.prototype.resolvedoptions */ \
CPP(PluralRulesPrototypeResolvedOptions) \
/* ecma402 #sec-intl.pluralrules.prototype.select */ \
CPP(PluralRulesPrototypeSelect) \
diff --git a/chromium/v8/src/builtins/builtins-error.cc b/chromium/v8/src/builtins/builtins-error.cc
index 840298eacbf..44dce9224a3 100644
--- a/chromium/v8/src/builtins/builtins-error.cc
+++ b/chromium/v8/src/builtins/builtins-error.cc
@@ -18,9 +18,12 @@ namespace internal {
// ES6 section 19.5.1.1 Error ( message )
BUILTIN(ErrorConstructor) {
HandleScope scope(isolate);
+ Handle<Object> options = FLAG_harmony_error_cause
+ ? args.atOrUndefined(isolate, 2)
+ : isolate->factory()->undefined_value();
RETURN_RESULT_OR_FAILURE(
isolate, ErrorUtils::Construct(isolate, args.target(), args.new_target(),
- args.atOrUndefined(isolate, 1)));
+ args.atOrUndefined(isolate, 1), options));
}
// static
diff --git a/chromium/v8/src/builtins/builtins-generator-gen.cc b/chromium/v8/src/builtins/builtins-generator-gen.cc
index 2e9d7e24e4f..b2d6e223e16 100644
--- a/chromium/v8/src/builtins/builtins-generator-gen.cc
+++ b/chromium/v8/src/builtins/builtins-generator-gen.cc
@@ -205,7 +205,7 @@ TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) {
// TODO(cbruni): Merge with corresponding bytecode handler.
TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
- auto context = Parameter<Context>(Descriptor::kContext);
+ auto context = LoadContextFromBaseline();
StoreJSGeneratorObjectContext(generator, context);
auto suspend_id = SmiTag(UncheckedParameter<IntPtrT>(Descriptor::kSuspendId));
StoreJSGeneratorObjectContinuation(generator, suspend_id);
diff --git a/chromium/v8/src/builtins/builtins-handler-gen.cc b/chromium/v8/src/builtins/builtins-handler-gen.cc
index 3cbd626b8e4..19a31b81a7c 100644
--- a/chromium/v8/src/builtins/builtins-handler-gen.cc
+++ b/chromium/v8/src/builtins/builtins-handler-gen.cc
@@ -183,28 +183,39 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW,
// All elements kinds handled by EmitElementStore. Specifically, this includes
// fast elements and fixed typed array elements.
-#define ELEMENTS_KINDS(V) \
- V(PACKED_SMI_ELEMENTS) \
- V(HOLEY_SMI_ELEMENTS) \
- V(PACKED_ELEMENTS) \
- V(PACKED_NONEXTENSIBLE_ELEMENTS) \
- V(PACKED_SEALED_ELEMENTS) \
- V(HOLEY_ELEMENTS) \
- V(HOLEY_NONEXTENSIBLE_ELEMENTS) \
- V(HOLEY_SEALED_ELEMENTS) \
- V(PACKED_DOUBLE_ELEMENTS) \
- V(HOLEY_DOUBLE_ELEMENTS) \
- V(UINT8_ELEMENTS) \
- V(INT8_ELEMENTS) \
- V(UINT16_ELEMENTS) \
- V(INT16_ELEMENTS) \
- V(UINT32_ELEMENTS) \
- V(INT32_ELEMENTS) \
- V(FLOAT32_ELEMENTS) \
- V(FLOAT64_ELEMENTS) \
- V(UINT8_CLAMPED_ELEMENTS) \
- V(BIGUINT64_ELEMENTS) \
- V(BIGINT64_ELEMENTS)
+#define ELEMENTS_KINDS(V) \
+ V(PACKED_SMI_ELEMENTS) \
+ V(HOLEY_SMI_ELEMENTS) \
+ V(PACKED_ELEMENTS) \
+ V(PACKED_NONEXTENSIBLE_ELEMENTS) \
+ V(PACKED_SEALED_ELEMENTS) \
+ V(HOLEY_ELEMENTS) \
+ V(HOLEY_NONEXTENSIBLE_ELEMENTS) \
+ V(HOLEY_SEALED_ELEMENTS) \
+ V(PACKED_DOUBLE_ELEMENTS) \
+ V(HOLEY_DOUBLE_ELEMENTS) \
+ V(UINT8_ELEMENTS) \
+ V(INT8_ELEMENTS) \
+ V(UINT16_ELEMENTS) \
+ V(INT16_ELEMENTS) \
+ V(UINT32_ELEMENTS) \
+ V(INT32_ELEMENTS) \
+ V(FLOAT32_ELEMENTS) \
+ V(FLOAT64_ELEMENTS) \
+ V(UINT8_CLAMPED_ELEMENTS) \
+ V(BIGUINT64_ELEMENTS) \
+ V(BIGINT64_ELEMENTS) \
+ V(RAB_GSAB_UINT8_ELEMENTS) \
+ V(RAB_GSAB_INT8_ELEMENTS) \
+ V(RAB_GSAB_UINT16_ELEMENTS) \
+ V(RAB_GSAB_INT16_ELEMENTS) \
+ V(RAB_GSAB_UINT32_ELEMENTS) \
+ V(RAB_GSAB_INT32_ELEMENTS) \
+ V(RAB_GSAB_FLOAT32_ELEMENTS) \
+ V(RAB_GSAB_FLOAT64_ELEMENTS) \
+ V(RAB_GSAB_UINT8_CLAMPED_ELEMENTS) \
+ V(RAB_GSAB_BIGUINT64_ELEMENTS) \
+ V(RAB_GSAB_BIGINT64_ELEMENTS)
void HandlerBuiltinsAssembler::DispatchByElementsKind(
TNode<Int32T> elements_kind, const ElementsKindSwitchCase& case_function,
diff --git a/chromium/v8/src/builtins/builtins-ic-gen.cc b/chromium/v8/src/builtins/builtins-ic-gen.cc
index 81bf6379ece..e172b5a129b 100644
--- a/chromium/v8/src/builtins/builtins-ic-gen.cc
+++ b/chromium/v8/src/builtins/builtins-ic-gen.cc
@@ -10,70 +10,221 @@
namespace v8 {
namespace internal {
-#define IC_BUILTIN(Name) \
- void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
- AccessorAssembler assembler(state); \
- assembler.Generate##Name(); \
- }
+void Builtins::Generate_LoadIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC();
+}
+void Builtins::Generate_LoadIC_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC_Megamorphic();
+}
+void Builtins::Generate_LoadIC_Noninlined(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC_Noninlined();
+}
+void Builtins::Generate_LoadIC_NoFeedback(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadIC_NoFeedback();
+}
+void Builtins::Generate_LoadICTrampoline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadICTrampoline();
+}
+void Builtins::Generate_LoadICBaseline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadICBaseline();
+}
+void Builtins::Generate_LoadICTrampoline_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadICTrampoline_Megamorphic();
+}
+void Builtins::Generate_LoadSuperIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadSuperIC();
+}
+void Builtins::Generate_LoadSuperICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadSuperICBaseline();
+}
+void Builtins::Generate_KeyedLoadIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadIC();
+}
+void Builtins::Generate_KeyedLoadIC_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadIC_Megamorphic();
+}
+void Builtins::Generate_KeyedLoadIC_PolymorphicName(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadIC_PolymorphicName();
+}
+void Builtins::Generate_KeyedLoadICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadICTrampoline();
+}
+void Builtins::Generate_KeyedLoadICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadICBaseline();
+}
+void Builtins::Generate_KeyedLoadICTrampoline_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedLoadICTrampoline_Megamorphic();
+}
+void Builtins::Generate_LoadGlobalIC_NoFeedback(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalIC_NoFeedback();
+}
+void Builtins::Generate_StoreGlobalIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreGlobalIC();
+}
+void Builtins::Generate_StoreGlobalICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreGlobalICTrampoline();
+}
+void Builtins::Generate_StoreGlobalICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreGlobalICBaseline();
+}
+void Builtins::Generate_StoreIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreIC();
+}
+void Builtins::Generate_StoreICTrampoline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreICTrampoline();
+}
+void Builtins::Generate_StoreICBaseline(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreICBaseline();
+}
+void Builtins::Generate_KeyedStoreIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedStoreIC();
+}
+void Builtins::Generate_KeyedStoreICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedStoreICTrampoline();
+}
+void Builtins::Generate_KeyedStoreICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedStoreICBaseline();
+}
+void Builtins::Generate_StoreInArrayLiteralIC(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreInArrayLiteralIC();
+}
+void Builtins::Generate_StoreInArrayLiteralICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateStoreInArrayLiteralICBaseline();
+}
+void Builtins::Generate_CloneObjectIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateCloneObjectIC();
+}
+void Builtins::Generate_CloneObjectICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateCloneObjectICBaseline();
+}
+void Builtins::Generate_CloneObjectIC_Slow(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateCloneObjectIC_Slow();
+}
+void Builtins::Generate_KeyedHasIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasIC();
+}
+void Builtins::Generate_KeyedHasICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasICBaseline();
+}
+void Builtins::Generate_KeyedHasIC_Megamorphic(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasIC_Megamorphic();
+}
+void Builtins::Generate_KeyedHasIC_PolymorphicName(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateKeyedHasIC_PolymorphicName();
+}
+
+void Builtins::Generate_LoadGlobalIC(compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalIC(TypeofMode::kNotInside);
+}
-#define IC_BUILTIN_PARAM(BuiltinName, GeneratorName, parameter) \
- void Builtins::Generate_##BuiltinName(compiler::CodeAssemblerState* state) { \
- AccessorAssembler assembler(state); \
- assembler.Generate##GeneratorName(parameter); \
- }
+void Builtins::Generate_LoadGlobalICInsideTypeof(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalIC(TypeofMode::kInside);
+}
-IC_BUILTIN(LoadIC)
-IC_BUILTIN(LoadIC_Megamorphic)
-IC_BUILTIN(LoadIC_Noninlined)
-IC_BUILTIN(LoadIC_NoFeedback)
-IC_BUILTIN(LoadICTrampoline)
-IC_BUILTIN(LoadICBaseline)
-IC_BUILTIN(LoadICTrampoline_Megamorphic)
-IC_BUILTIN(LoadSuperIC)
-IC_BUILTIN(LoadSuperICBaseline)
-IC_BUILTIN(KeyedLoadIC)
-IC_BUILTIN(KeyedLoadIC_Megamorphic)
-IC_BUILTIN(KeyedLoadIC_PolymorphicName)
-IC_BUILTIN(KeyedLoadICTrampoline)
-IC_BUILTIN(KeyedLoadICBaseline)
-IC_BUILTIN(KeyedLoadICTrampoline_Megamorphic)
-IC_BUILTIN(LoadGlobalIC_NoFeedback)
-IC_BUILTIN(StoreGlobalIC)
-IC_BUILTIN(StoreGlobalICTrampoline)
-IC_BUILTIN(StoreGlobalICBaseline)
-IC_BUILTIN(StoreIC)
-IC_BUILTIN(StoreICTrampoline)
-IC_BUILTIN(StoreICBaseline)
-IC_BUILTIN(KeyedStoreIC)
-IC_BUILTIN(KeyedStoreICTrampoline)
-IC_BUILTIN(KeyedStoreICBaseline)
-IC_BUILTIN(StoreInArrayLiteralIC)
-IC_BUILTIN(StoreInArrayLiteralICBaseline)
-IC_BUILTIN(CloneObjectIC)
-IC_BUILTIN(CloneObjectICBaseline)
-IC_BUILTIN(CloneObjectIC_Slow)
-IC_BUILTIN(KeyedHasIC)
-IC_BUILTIN(KeyedHasICBaseline)
-IC_BUILTIN(KeyedHasIC_Megamorphic)
-IC_BUILTIN(KeyedHasIC_PolymorphicName)
+void Builtins::Generate_LoadGlobalICTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICTrampoline(TypeofMode::kNotInside);
+}
-IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline,
- NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline,
- INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICBaseline, LoadGlobalICBaseline, NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofBaseline, LoadGlobalICBaseline,
- INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupGlobalICBaseline, LookupGlobalICBaseline,
- NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupGlobalICInsideTypeofBaseline, LookupGlobalICBaseline,
- INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupContextBaseline, LookupContextBaseline,
- NOT_INSIDE_TYPEOF)
-IC_BUILTIN_PARAM(LookupContextInsideTypeofBaseline, LookupContextBaseline,
- INSIDE_TYPEOF)
+void Builtins::Generate_LoadGlobalICInsideTypeofTrampoline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICTrampoline(TypeofMode::kInside);
+}
+
+void Builtins::Generate_LoadGlobalICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICBaseline(TypeofMode::kNotInside);
+}
+
+void Builtins::Generate_LoadGlobalICInsideTypeofBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLoadGlobalICBaseline(TypeofMode::kInside);
+}
+
+void Builtins::Generate_LookupGlobalICBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupGlobalICBaseline(TypeofMode::kNotInside);
+}
+
+void Builtins::Generate_LookupGlobalICInsideTypeofBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupGlobalICBaseline(TypeofMode::kInside);
+}
+
+void Builtins::Generate_LookupContextBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupContextBaseline(TypeofMode::kNotInside);
+}
+
+void Builtins::Generate_LookupContextInsideTypeofBaseline(
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler assembler(state);
+ assembler.GenerateLookupContextBaseline(TypeofMode::kInside);
+}
TF_BUILTIN(DynamicCheckMaps, CodeStubAssembler) {
auto map = Parameter<Map>(Descriptor::kMap);
diff --git a/chromium/v8/src/builtins/builtins-internal-gen.cc b/chromium/v8/src/builtins/builtins-internal-gen.cc
index 0c4131dba96..274709b46a0 100644
--- a/chromium/v8/src/builtins/builtins-internal-gen.cc
+++ b/chromium/v8/src/builtins/builtins-internal-gen.cc
@@ -7,6 +7,7 @@
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/codegen/code-stub-assembler.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/execution/frame-constants.h"
#include "src/heap/memory-chunk.h"
@@ -172,11 +173,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
}
TNode<BoolT> ShouldSkipFPRegs(TNode<Smi> mode) {
- return TaggedEqual(mode, SmiConstant(kDontSaveFPRegs));
+ return TaggedEqual(mode, SmiConstant(SaveFPRegsMode::kIgnore));
}
TNode<BoolT> ShouldEmitRememberSet(TNode<Smi> remembered_set) {
- return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET));
+ return TaggedEqual(remembered_set, SmiConstant(RememberedSetAction::kEmit));
}
template <typename Ret, typename Arg0, typename Arg1>
@@ -188,7 +189,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&dont_save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kDontSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kIgnore,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1));
Goto(next);
@@ -197,7 +198,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kSave,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1));
Goto(next);
@@ -213,7 +214,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&dont_save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kDontSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kIgnore,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1),
std::make_pair(MachineTypeOf<Arg2>::value, arg2));
@@ -223,7 +224,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler {
BIND(&save_fp);
{
CallCFunctionWithCallerSavedRegisters(
- function, MachineTypeOf<Ret>::value, kSaveFPRegs,
+ function, MachineTypeOf<Ret>::value, SaveFPRegsMode::kSave,
std::make_pair(MachineTypeOf<Arg0>::value, arg0),
std::make_pair(MachineTypeOf<Arg1>::value, arg1),
std::make_pair(MachineTypeOf<Arg2>::value, arg2));
@@ -821,8 +822,9 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
const bool builtin_exit_frame = true;
- TNode<Code> code = HeapConstant(CodeFactory::CEntry(
- isolate(), 1, kDontSaveFPRegs, kArgvOnStack, builtin_exit_frame));
+ TNode<Code> code =
+ HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame));
// Unconditionally push argc, target and new target as extra stack arguments.
// They will be used by stack frame iterators when constructing stack trace.
@@ -891,54 +893,54 @@ TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) {
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
}
void Builtins::
Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvInRegister, false);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
}
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
}
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
}
void Builtins::
Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvInRegister, false);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
}
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, false);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
- Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true);
+ Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
}
#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS)
@@ -956,7 +958,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
diff --git a/chromium/v8/src/builtins/builtins-intl.cc b/chromium/v8/src/builtins/builtins-intl.cc
index fe32a484a3e..6febc81c3a5 100644
--- a/chromium/v8/src/builtins/builtins-intl.cc
+++ b/chromium/v8/src/builtins/builtins-intl.cc
@@ -668,6 +668,49 @@ BUILTIN(LocalePrototypeMinimize) {
RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Minimize(isolate, locale));
}
+BUILTIN(LocalePrototypeCalendars) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.calendars");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Calendars(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeCollations) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.collations");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Collations(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeHourCycles) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.hourCycles");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::HourCycles(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeNumberingSystems) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.numberingSystems");
+ RETURN_RESULT_OR_FAILURE(isolate,
+ JSLocale::NumberingSystems(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeTextInfo) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.textInfo");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::TextInfo(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeTimeZones) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.timeZones");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::TimeZones(isolate, locale));
+}
+
+BUILTIN(LocalePrototypeWeekInfo) {
+ HandleScope scope(isolate);
+ CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.weekInfo");
+ RETURN_RESULT_OR_FAILURE(isolate, JSLocale::WeekInfo(isolate, locale));
+}
+
BUILTIN(RelativeTimeFormatSupportedLocalesOf) {
HandleScope scope(isolate);
Handle<Object> locales = args.atOrUndefined(isolate, 1);
diff --git a/chromium/v8/src/builtins/builtins-lazy-gen.cc b/chromium/v8/src/builtins/builtins-lazy-gen.cc
index 8af0bef95d2..4749ee094bc 100644
--- a/chromium/v8/src/builtins/builtins-lazy-gen.cc
+++ b/chromium/v8/src/builtins/builtins-lazy-gen.cc
@@ -154,20 +154,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
GotoIf(InstanceTypeEqual(sfi_data_type.value(), BASELINE_DATA_TYPE),
&baseline);
- // Finally, check for presence of an NCI cached Code object - if an entry
- // possibly exists, call into runtime to query the cache.
- TNode<Uint8T> flags2 =
- LoadObjectField<Uint8T>(shared, SharedFunctionInfo::kFlags2Offset);
- TNode<BoolT> may_have_cached_code =
- IsSetWord32<SharedFunctionInfo::MayHaveCachedCodeBit>(flags2);
- code = Select<Code>(
- may_have_cached_code,
- [=]() {
- return CAST(CallRuntime(Runtime::kTryInstallNCICode,
- Parameter<Context>(Descriptor::kContext),
- function));
- },
- [=]() { return sfi_code; });
+ code = sfi_code;
Goto(&tailcall_code);
BIND(&baseline);
diff --git a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
index 9f16186d13b..281e9234dc7 100644
--- a/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
+++ b/chromium/v8/src/builtins/builtins-microtask-queue-gen.cc
@@ -46,8 +46,11 @@ class MicrotaskQueueBuiltinsAssembler : public CodeStubAssembler {
void EnterMicrotaskContext(TNode<Context> native_context);
void RewindEnteredContext(TNode<IntPtrT> saved_entered_context_count);
+ void RunAllPromiseHooks(PromiseHookType type, TNode<Context> context,
+ TNode<HeapObject> promise_or_capability);
void RunPromiseHook(Runtime::FunctionId id, TNode<Context> context,
- TNode<HeapObject> promise_or_capability);
+ TNode<HeapObject> promise_or_capability,
+ TNode<Uint32T> promiseHookFlags);
};
TNode<RawPtrT> MicrotaskQueueBuiltinsAssembler::GetMicrotaskQueue(
@@ -199,7 +202,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
const TNode<Object> thenable = LoadObjectField(
microtask, PromiseResolveThenableJobTask::kThenableOffset);
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
+ RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
CAST(promise_to_resolve));
{
@@ -208,7 +211,7 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
promise_to_resolve, thenable, then);
}
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
+ RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
CAST(promise_to_resolve));
RewindEnteredContext(saved_entered_context_count);
@@ -243,8 +246,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&preserved_data_done);
// Run the promise before/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
+ promise_or_capability);
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
@@ -253,8 +256,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
}
// Run the promise after/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
+ promise_or_capability);
Label preserved_data_reset_done(this);
GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
@@ -296,8 +299,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
BIND(&preserved_data_done);
// Run the promise before/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookBefore, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kBefore, microtask_context,
+ promise_or_capability);
{
ScopedExceptionHandler handler(this, &if_exception, &var_exception);
@@ -306,8 +309,8 @@ void MicrotaskQueueBuiltinsAssembler::RunSingleMicrotask(
}
// Run the promise after/debug hook if enabled.
- RunPromiseHook(Runtime::kPromiseHookAfter, microtask_context,
- promise_or_capability);
+ RunAllPromiseHooks(PromiseHookType::kAfter, microtask_context,
+ promise_or_capability);
Label preserved_data_reset_done(this);
GotoIf(IsUndefined(preserved_embedder_data), &preserved_data_reset_done);
@@ -465,12 +468,42 @@ void MicrotaskQueueBuiltinsAssembler::RewindEnteredContext(
saved_entered_context_count);
}
+void MicrotaskQueueBuiltinsAssembler::RunAllPromiseHooks(
+ PromiseHookType type, TNode<Context> context,
+ TNode<HeapObject> promise_or_capability) {
+ Label hook(this, Label::kDeferred), done_hook(this);
+ TNode<Uint32T> promiseHookFlags = PromiseHookFlags();
+ Branch(NeedsAnyPromiseHooks(promiseHookFlags), &hook, &done_hook);
+ BIND(&hook);
+ {
+ switch (type) {
+ case PromiseHookType::kBefore:
+ RunContextPromiseHookBefore(context, promise_or_capability,
+ promiseHookFlags);
+ RunPromiseHook(Runtime::kPromiseHookBefore, context,
+ promise_or_capability, promiseHookFlags);
+ break;
+ case PromiseHookType::kAfter:
+ RunContextPromiseHookAfter(context, promise_or_capability,
+ promiseHookFlags);
+ RunPromiseHook(Runtime::kPromiseHookAfter, context,
+ promise_or_capability, promiseHookFlags);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ Goto(&done_hook);
+ }
+ BIND(&done_hook);
+}
+
void MicrotaskQueueBuiltinsAssembler::RunPromiseHook(
Runtime::FunctionId id, TNode<Context> context,
- TNode<HeapObject> promise_or_capability) {
+ TNode<HeapObject> promise_or_capability,
+ TNode<Uint32T> promiseHookFlags) {
Label hook(this, Label::kDeferred), done_hook(this);
- Branch(IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(), &hook,
- &done_hook);
+ Branch(IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ promiseHookFlags), &hook, &done_hook);
BIND(&hook);
{
// Get to the underlying JSPromise instance.
diff --git a/chromium/v8/src/builtins/builtins-regexp-gen.cc b/chromium/v8/src/builtins/builtins-regexp-gen.cc
index 23648efb98b..e59d2a00ac3 100644
--- a/chromium/v8/src/builtins/builtins-regexp-gen.cc
+++ b/chromium/v8/src/builtins/builtins-regexp-gen.cc
@@ -1014,6 +1014,12 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) {
TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
TNode<Object> regexp,
bool is_fastpath) {
+ TVARIABLE(String, result);
+ Label runtime(this, Label::kDeferred), done(this, &result);
+ if (is_fastpath) {
+ GotoIfForceSlowPath(&runtime);
+ }
+
Isolate* isolate = this->isolate();
const TNode<IntPtrT> int_one = IntPtrConstant(1);
@@ -1110,7 +1116,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
// corresponding char for each set flag.
{
- const TNode<String> result = AllocateSeqOneByteString(var_length.value());
+ const TNode<String> string = AllocateSeqOneByteString(var_length.value());
TVARIABLE(IntPtrT, var_offset,
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
@@ -1120,7 +1126,7 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
Label next(this); \
GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
const TNode<Int32T> value = Int32Constant(CHAR); \
- StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
var_offset.value(), value); \
var_offset = IntPtrAdd(var_offset.value(), int_one); \
Goto(&next); \
@@ -1137,7 +1143,26 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
#undef CASE_FOR_FLAG
- return result;
+ if (is_fastpath) {
+#ifdef V8_ENABLE_FORCE_SLOW_PATH
+ result = string;
+ Goto(&done);
+
+ BIND(&runtime);
+ {
+ result =
+ CAST(CallRuntime(Runtime::kRegExpStringFromFlags, context, regexp));
+ Goto(&done);
+ }
+
+ BIND(&done);
+ return result.value();
+#else
+ return string;
+#endif
+ } else {
+ return string;
+ }
}
}
diff --git a/chromium/v8/src/builtins/builtins-trace.cc b/chromium/v8/src/builtins/builtins-trace.cc
index cf85ce9948b..24baf59522b 100644
--- a/chromium/v8/src/builtins/builtins-trace.cc
+++ b/chromium/v8/src/builtins/builtins-trace.cc
@@ -9,6 +9,7 @@
#include "src/json/json-stringifier.h"
#include "src/logging/counters.h"
#include "src/objects/objects-inl.h"
+#include "src/tracing/traced-value.h"
#if defined(V8_USE_PERFETTO)
#include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h"
diff --git a/chromium/v8/src/builtins/builtins-typed-array-gen.cc b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
index 65b1ab2f2b9..d333a61e395 100644
--- a/chromium/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/chromium/v8/src/builtins/builtins-typed-array-gen.cc
@@ -123,13 +123,26 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was detached.
+ TNode<JSTypedArray> receiver_array = CAST(receiver);
TNode<JSArrayBuffer> receiver_buffer =
- LoadJSArrayBufferViewBuffer(CAST(receiver));
- TNode<UintPtrT> byte_length = Select<UintPtrT>(
- IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
- [=] { return LoadJSArrayBufferViewByteLength(CAST(receiver)); });
- Return(ChangeUintPtrToTagged(byte_length));
+ LoadJSArrayBufferViewBuffer(receiver_array);
+
+ Label variable_length(this), normal(this);
+ Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal);
+ BIND(&variable_length);
+ {
+ Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayByteLength(
+ context, receiver_array, receiver_buffer)));
+ }
+
+ BIND(&normal);
+ {
+ // Default to zero if the {receiver}s buffer was detached.
+ TNode<UintPtrT> byte_length = Select<UintPtrT>(
+ IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
+ [=] { return LoadJSArrayBufferViewByteLength(receiver_array); });
+ Return(ChangeUintPtrToTagged(byte_length));
+ }
}
// ES6 #sec-get-%typedarray%.prototype.byteoffset
@@ -159,13 +172,29 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) {
// Check if the {receiver} is actually a JSTypedArray.
ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName);
- // Default to zero if the {receiver}s buffer was detached.
+ TNode<JSTypedArray> receiver_array = CAST(receiver);
TNode<JSArrayBuffer> receiver_buffer =
- LoadJSArrayBufferViewBuffer(CAST(receiver));
- TNode<UintPtrT> length = Select<UintPtrT>(
- IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
- [=] { return LoadJSTypedArrayLength(CAST(receiver)); });
- Return(ChangeUintPtrToTagged(length));
+ LoadJSArrayBufferViewBuffer(receiver_array);
+
+ Label variable_length(this), normal(this);
+ Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal);
+ BIND(&variable_length);
+ {
+ Label miss(this);
+ Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayLength(
+ receiver_array, receiver_buffer, &miss)));
+ BIND(&miss);
+ Return(ChangeUintPtrToTagged(UintPtrConstant(0)));
+ }
+
+ BIND(&normal);
+ {
+ // Default to zero if the {receiver}s buffer was detached.
+ TNode<UintPtrT> length = Select<UintPtrT>(
+ IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); },
+ [=] { return LoadJSTypedArrayLength(receiver_array); });
+ Return(ChangeUintPtrToTagged(length));
+ }
}
TNode<BoolT> TypedArrayBuiltinsAssembler::IsUint8ElementsKind(
@@ -322,17 +351,18 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
int32_t elements_kinds[] = {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this);
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
Label* elements_kind_labels[] = {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array,
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
};
STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels));
@@ -350,6 +380,15 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ BIND(&if_##type##array); \
+ { \
+ case_function(TYPE##_ELEMENTS, sizeof(ctype), 0); \
+ Goto(&next); \
+ }
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
BIND(&if_unknown_type);
Unreachable();
@@ -374,7 +413,7 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
IntPtrSub(full_base, Signed(ChangeUint32ToWord(compressed_base)));
// Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset.
DCHECK_EQ(
- isolate()->isolate_root(),
+ isolate()->cage_base(),
JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate()));
// See JSTypedArray::SetOnHeapDataPtr() for details.
offset = Unsigned(IntPtrAdd(offset, ptr_compr_cage_base));
diff --git a/chromium/v8/src/builtins/builtins-typed-array.cc b/chromium/v8/src/builtins/builtins-typed-array.cc
index fdadc7a554c..bb936e6e463 100644
--- a/chromium/v8/src/builtins/builtins-typed-array.cc
+++ b/chromium/v8/src/builtins/builtins-typed-array.cc
@@ -154,7 +154,8 @@ BUILTIN(TypedArrayPrototypeFill) {
DCHECK_LE(end, len);
DCHECK_LE(count, len);
- return ElementsAccessor::ForKind(kind)->Fill(array, obj_value, start, end);
+ RETURN_RESULT_OR_FAILURE(isolate, ElementsAccessor::ForKind(kind)->Fill(
+ array, obj_value, start, end));
}
BUILTIN(TypedArrayPrototypeIncludes) {
diff --git a/chromium/v8/src/builtins/builtins-utils.h b/chromium/v8/src/builtins/builtins-utils.h
index e5f420a20de..b9146ab6253 100644
--- a/chromium/v8/src/builtins/builtins-utils.h
+++ b/chromium/v8/src/builtins/builtins-utils.h
@@ -85,8 +85,7 @@ class BuiltinArguments : public JavaScriptArguments {
V8_NOINLINE static Address Builtin_Impl_Stats_##name( \
int args_length, Address* args_object, Isolate* isolate) { \
BuiltinArguments args(args_length, args_object); \
- RuntimeCallTimerScope timer(isolate, \
- RuntimeCallCounterId::kBuiltin_##name); \
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kBuiltin_##name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Builtin_" #name); \
return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \
diff --git a/chromium/v8/src/builtins/builtins-wasm-gen.cc b/chromium/v8/src/builtins/builtins-wasm-gen.cc
index 0704d8681ba..eb9311d0c62 100644
--- a/chromium/v8/src/builtins/builtins-wasm-gen.cc
+++ b/chromium/v8/src/builtins/builtins-wasm-gen.cc
@@ -9,7 +9,6 @@
#include "src/codegen/interface-descriptors.h"
#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
-#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/builtins/cast.tq b/chromium/v8/src/builtins/cast.tq
index b490055a19e..d7d2eb6aa6d 100644
--- a/chromium/v8/src/builtins/cast.tq
+++ b/chromium/v8/src/builtins/cast.tq
@@ -329,6 +329,24 @@ Cast<Number|TheHole>(o: Object): Number|TheHole labels CastError {
}
}
+Cast<Context|Zero|Undefined>(o: Object): Context|Zero|Undefined
+ labels CastError {
+ typeswitch (o) {
+ case (o: Context): {
+ return o;
+ }
+ case (o: Zero): {
+ return o;
+ }
+ case (o: Undefined): {
+ return o;
+ }
+ case (Object): {
+ goto CastError;
+ }
+ }
+}
+
macro Cast<A : type extends HeapObject>(o: HeapObject): A
labels CastError;
@@ -386,6 +404,12 @@ Cast<Undefined|Callable>(o: HeapObject): Undefined|Callable
return HeapObjectToCallable(o) otherwise CastError;
}
+Cast<Undefined|JSFunction>(o: HeapObject): Undefined|JSFunction
+ labels CastError {
+ if (o == Undefined) return Undefined;
+ return Cast<JSFunction>(o) otherwise CastError;
+}
+
macro Cast<T : type extends Symbol>(o: Symbol): T labels CastError;
Cast<PublicSymbol>(s: Symbol): PublicSymbol labels CastError {
if (s.flags.is_private) goto CastError;
diff --git a/chromium/v8/src/builtins/constructor.tq b/chromium/v8/src/builtins/constructor.tq
index add6db03052..d929c7f485f 100644
--- a/chromium/v8/src/builtins/constructor.tq
+++ b/chromium/v8/src/builtins/constructor.tq
@@ -15,6 +15,8 @@ extern runtime CreateObjectLiteral(
namespace constructor {
+extern builtin FastNewClosure(
+ Context, SharedFunctionInfo, FeedbackCell): JSFunction;
extern builtin FastNewObject(Context, JSFunction, JSReceiver): JSObject;
extern enum AllocationSiteMode {
@@ -42,6 +44,15 @@ extern macro ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
extern macro ConstructorBuiltinsAssembler::CreateEmptyObjectLiteral(Context):
JSObject;
+extern macro LoadContextFromBaseline(): Context;
+
+builtin FastNewClosureBaseline(
+ sharedFunctionInfo: SharedFunctionInfo,
+ feedbackCell: FeedbackCell): JSFunction {
+ const context = LoadContextFromBaseline();
+ tail FastNewClosure(context, sharedFunctionInfo, feedbackCell);
+}
+
builtin FastNewFunctionContextEval(implicit context: Context)(
scopeInfo: ScopeInfo, slots: uint32): Context {
return FastNewFunctionContext(scopeInfo, slots, context, kEvalScope);
diff --git a/chromium/v8/src/builtins/conversion.tq b/chromium/v8/src/builtins/conversion.tq
index 5a2dccd068c..636f49a024d 100644
--- a/chromium/v8/src/builtins/conversion.tq
+++ b/chromium/v8/src/builtins/conversion.tq
@@ -45,11 +45,30 @@ builtin NumberToString(implicit context: Context)(input: Number): String {
}
// ES6 section 7.1.2 ToBoolean ( argument )
-builtin ToBoolean(implicit context: Context)(input: JSAny): Boolean {
+builtin ToBoolean(input: JSAny): Boolean {
BranchIfToBooleanIsTrue(input) otherwise return TrueConstant(),
return FalseConstant();
}
+struct ToBooleanForBaselineJumpResult {
+ value: JSAny;
+ is_to_boolean: Smi;
+}
+// ToBoolean for baseline code jumps, which
+// a) returns the original value as the first return value, to avoid needing
+// to save it in the caller, and
+// b) returns the true/false value as a Smi, to make the baseline-side
+// comparison cheaper.
+builtin ToBooleanForBaselineJump(input: JSAny): ToBooleanForBaselineJumpResult {
+ try {
+ BranchIfToBooleanIsTrue(input) otherwise IsTrue, IsFalse;
+ } label IsTrue {
+ return ToBooleanForBaselineJumpResult{value: input, is_to_boolean: 1};
+ } label IsFalse {
+ return ToBooleanForBaselineJumpResult{value: input, is_to_boolean: 0};
+ }
+}
+
transitioning builtin ToLength(implicit context: Context)(input: JSAny):
Number {
// We might need to loop once for ToNumber conversion.
diff --git a/chromium/v8/src/builtins/ia32/builtins-ia32.cc b/chromium/v8/src/builtins/ia32/builtins-ia32.cc
index 44b71bed915..4993de4816f 100644
--- a/chromium/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/chromium/v8/src/builtins/ia32/builtins-ia32.cc
@@ -8,6 +8,7 @@
#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -116,7 +117,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// edx: new target
// Reload context from the frame.
__ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
- __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
+ __ InvokeFunction(edi, edx, eax, InvokeType::kCall);
// Restore context from the frame.
__ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
@@ -245,7 +246,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore and and call the constructor function.
__ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
- __ InvokeFunction(edi, edx, eax, CALL_FUNCTION);
+ __ InvokeFunction(edi, edx, eax, InvokeType::kCall);
// ----------- S t a t e -------------
// -- eax: constructor result
@@ -597,7 +598,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax);
__ RecordWriteField(edx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset));
@@ -645,15 +646,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mov(edi, ecx);
-
__ bind(&loop);
- __ dec(edi);
+ __ dec(ecx);
__ j(less, &done_loop);
__ Push(
- FieldOperand(ebx, edi, times_tagged_size, FixedArray::kHeaderSize));
+ FieldOperand(ebx, ecx, times_tagged_size, FixedArray::kHeaderSize));
__ jmp(&loop);
-
__ bind(&done_loop);
}
@@ -740,7 +738,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code);
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
+ SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1458,7 +1457,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// and edi are used as scratch registers.
Generate_InterpreterPushZeroAndArgsAndReturnAddress(
masm, eax, ecx, edx, edi,
- InterpreterPushArgsThenConstructDescriptor::kStackArgumentsCount,
+ InterpreterPushArgsThenConstructDescriptor::GetStackParameterCount(),
&stack_overflow);
// Call the appropriate constructor. eax and ecx already contain intended
@@ -1591,7 +1590,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ jmp(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ mov(kInterpreterBytecodeArrayRegister,
Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1636,7 +1635,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
// static
@@ -1666,7 +1665,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ mov(feedback_vector,
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch);
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
}
@@ -1939,7 +1938,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ Set(eax, 0);
+ __ Move(eax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -2108,6 +2107,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2133,7 +2133,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ pop(kArgumentsList);
__ PushReturnAddressFrom(edx);
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow kArgumentsList to be a FixedArray, or a FixedDoubleArray if
// kArgumentsLength == 0.
Label ok, fail;
@@ -2294,7 +2294,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ AllocateStackSpace(scratch);
// Include return address and receiver.
__ add(eax, Immediate(2));
- __ Set(current, 0);
+ __ Move(current, 0);
__ jmp(&check);
// Loop.
__ bind(&copy);
@@ -2443,7 +2443,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzx_w(
ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(edi, no_reg, ecx, eax, JUMP_FUNCTION);
+ __ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
{
@@ -2788,6 +2788,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ mov(kContextRegister,
+ MemOperand(ebp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
@@ -2896,6 +2898,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2908,7 +2915,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// esi: current context (C callee-saved)
// edi: JS function of the caller (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// ecx: pointer to the first argument
STATIC_ASSERT(eax == kRuntimeCallArgCountRegister);
@@ -2928,8 +2935,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
int arg_stack_space = 3;
// Enter the exit frame that transitions from JavaScript to C++.
- if (argv_mode == kArgvInRegister) {
- DCHECK(save_doubles == kDontSaveFPRegs);
+ if (argv_mode == ArgvMode::kRegister) {
+ DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
DCHECK(!builtin_exit_frame);
__ EnterApiExitFrame(arg_stack_space, edi);
@@ -2938,7 +2945,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ mov(edi, eax);
} else {
__ EnterExitFrame(
- arg_stack_space, save_doubles == kSaveFPRegs,
+ arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
@@ -2985,7 +2992,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
+ argv_mode == ArgvMode::kStack);
__ ret(0);
// Handling of exception.
@@ -3148,7 +3156,7 @@ Operand ApiParameterOperand(int index) {
// stores the pointer to the reserved slot into esi.
void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) {
__ EnterApiExitFrame(argc, scratch);
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
@@ -3961,9 +3969,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ movsd(Operand(esi, dst_offset), xmm0);
}
+ if (FLAG_debug_code) {
+ const int kTopMask = 0x3800;
+ __ push(eax);
+ __ fwait();
+ __ fnstsw_ax();
+ __ test(eax, Immediate(kTopMask));
+ __ Assert(zero, AbortReason::kFpuTopIsNotZeroInDeoptimizer);
+ __ pop(eax);
+ }
// Clear FPU all exceptions.
- // TODO(ulan): Find out why the TOP register is not zero here in some cases,
- // and check that the generated code never deoptimizes with unbalanced stack.
__ fnclex();
// Mark the stack as not iterable for the CPU profiler which won't be able to
diff --git a/chromium/v8/src/builtins/ic-callable.tq b/chromium/v8/src/builtins/ic-callable.tq
index 85525c4c683..dd29e8bf5e2 100644
--- a/chromium/v8/src/builtins/ic-callable.tq
+++ b/chromium/v8/src/builtins/ic-callable.tq
@@ -6,6 +6,10 @@ namespace ic {
namespace callable {
extern macro IncrementCallCount(FeedbackVector, uintptr): void;
+const kCallFeedbackContentFieldMask: constexpr int32
+ generates 'FeedbackNexus::CallFeedbackContentField::kMask';
+const kCallFeedbackContentFieldShift: constexpr uint32
+ generates 'FeedbackNexus::CallFeedbackContentField::kShift';
macro IsMonomorphic(feedback: MaybeObject, target: JSAny): bool {
return IsWeakReferenceToObject(feedback, target);
@@ -50,8 +54,42 @@ macro TransitionToMegamorphic(implicit context: Context)(
ReportFeedbackUpdate(feedbackVector, slotId, 'Call:TransitionMegamorphic');
}
+macro TaggedEqualPrototypeApplyFunction(implicit context: Context)(
+ target: JSAny): bool {
+ return TaggedEqual(target, GetPrototypeApplyFunction());
+}
+
+macro FeedbackValueIsReceiver(implicit context: Context)(
+ feedbackVector: FeedbackVector, slotId: uintptr): bool {
+ const callCount: intptr = SmiUntag(Cast<Smi>(LoadFeedbackVectorSlot(
+ feedbackVector, slotId, kTaggedSize)) otherwise return false);
+ return (callCount & IntPtrConstant(kCallFeedbackContentFieldMask)) !=
+ IntPtrConstant(0);
+}
+
+macro SetCallFeedbackContent(implicit context: Context)(
+ feedbackVector: FeedbackVector, slotId: uintptr,
+ callFeedbackContent: constexpr CallFeedbackContent): void {
+ // Load the call count field from the feecback vector.
+ const callCount: intptr = SmiUntag(Cast<Smi>(LoadFeedbackVectorSlot(
+ feedbackVector, slotId, kTaggedSize)) otherwise return );
+ // The second lowest bits of the call count are used to state whether the
+ // feedback collected is a target or a receiver. Change that bit based on the
+ // callFeedbackContent input.
+ const callFeedbackContentFieldMask: intptr =
+ ~IntPtrConstant(kCallFeedbackContentFieldMask);
+ const newCount: intptr = (callCount & callFeedbackContentFieldMask) |
+ Convert<intptr>(Signed(
+ %RawConstexprCast<constexpr uint32>(callFeedbackContent)
+ << kCallFeedbackContentFieldShift));
+ StoreFeedbackVectorSlot(
+ feedbackVector, slotId, SmiTag(newCount), SKIP_WRITE_BARRIER,
+ kTaggedSize);
+ ReportFeedbackUpdate(feedbackVector, slotId, 'Call:SetCallFeedbackContent');
+}
+
macro CollectCallFeedback(
- maybeTarget: JSAny, context: Context,
+ maybeTarget: JSAny, maybeReceiver: Lazy<JSAny>, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
// TODO(v8:9891): Remove this assert once all callers are ported to Torque.
// This assert ensures correctness of maybeFeedbackVector's type which can
@@ -72,7 +110,24 @@ macro CollectCallFeedback(
// If cleared, we have a new chance to become monomorphic.
const feedbackValue: HeapObject =
- MaybeObjectToStrong(feedback) otherwise TryInitializeAsMonomorphic;
+ MaybeObjectToStrong(feedback) otherwise TryReinitializeAsMonomorphic;
+
+ if (FeedbackValueIsReceiver(feedbackVector, slotId) &&
+ TaggedEqualPrototypeApplyFunction(maybeTarget)) {
+ // If the Receiver is recorded and the target is
+ // Function.prototype.apply, check whether we can stay monomorphic based
+ // on the receiver.
+ if (IsMonomorphic(feedback, RunLazy(maybeReceiver))) {
+ return;
+ } else {
+ // If not, reinitialize the feedback with target.
+ SetCallFeedbackContent(
+ feedbackVector, slotId, CallFeedbackContent::kTarget);
+ TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId)
+ otherwise TransitionToMegamorphic;
+ return;
+ }
+ }
// Try transitioning to a feedback cell.
// Check if {target}s feedback cell matches the {feedbackValue}.
@@ -92,8 +147,20 @@ macro CollectCallFeedback(
StoreWeakReferenceInFeedbackVector(feedbackVector, slotId, feedbackCell);
ReportFeedbackUpdate(feedbackVector, slotId, 'Call:FeedbackVectorCell');
+ } label TryReinitializeAsMonomorphic {
+ SetCallFeedbackContent(
+ feedbackVector, slotId, CallFeedbackContent::kTarget);
+ goto TryInitializeAsMonomorphic;
} label TryInitializeAsMonomorphic {
- TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId)
+ let recordedFunction = maybeTarget;
+ if (TaggedEqualPrototypeApplyFunction(maybeTarget)) {
+ recordedFunction = RunLazy(maybeReceiver);
+ SetCallFeedbackContent(
+ feedbackVector, slotId, CallFeedbackContent::kReceiver);
+ } else {
+ assert(!FeedbackValueIsReceiver(feedbackVector, slotId));
+ }
+ TryInitializeAsMonomorphic(recordedFunction, feedbackVector, slotId)
otherwise TransitionToMegamorphic;
} label TransitionToMegamorphic {
TransitionToMegamorphic(feedbackVector, slotId);
diff --git a/chromium/v8/src/builtins/ic.tq b/chromium/v8/src/builtins/ic.tq
index 49d4e78fa55..a9e92cf63ec 100644
--- a/chromium/v8/src/builtins/ic.tq
+++ b/chromium/v8/src/builtins/ic.tq
@@ -8,10 +8,10 @@ namespace ic {
@export
macro CollectCallFeedback(
- maybeTarget: JSAny, context: Context,
+ maybeTarget: JSAny, maybeReceiver: Lazy<JSAny>, context: Context,
maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void {
callable::CollectCallFeedback(
- maybeTarget, context, maybeFeedbackVector, slotId);
+ maybeTarget, maybeReceiver, context, maybeFeedbackVector, slotId);
}
@export
@@ -51,10 +51,15 @@ macro IsUninitialized(feedback: MaybeObject): bool {
}
extern macro LoadFeedbackVectorSlot(FeedbackVector, uintptr): MaybeObject;
+extern macro LoadFeedbackVectorSlot(
+ FeedbackVector, uintptr, constexpr int32): MaybeObject;
extern operator '[]' macro LoadFeedbackVectorSlot(
FeedbackVector, intptr): MaybeObject;
extern macro StoreFeedbackVectorSlot(
FeedbackVector, uintptr, MaybeObject): void;
+extern macro StoreFeedbackVectorSlot(
+ FeedbackVector, uintptr, MaybeObject, constexpr WriteBarrierMode,
+ constexpr int32): void;
extern macro StoreWeakReferenceInFeedbackVector(
FeedbackVector, uintptr, HeapObject): MaybeObject;
extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string);
diff --git a/chromium/v8/src/builtins/iterator.tq b/chromium/v8/src/builtins/iterator.tq
index 05993ea6d77..150e3d2cb57 100644
--- a/chromium/v8/src/builtins/iterator.tq
+++ b/chromium/v8/src/builtins/iterator.tq
@@ -78,8 +78,8 @@ extern macro LoadContextFromBaseline(): Context;
extern macro LoadFeedbackVectorFromBaseline(): FeedbackVector;
transitioning builtin GetIteratorBaseline(
- context: Context, receiver: JSAny, loadSlot: TaggedIndex,
- callSlot: TaggedIndex): JSAny {
+ receiver: JSAny, loadSlot: TaggedIndex, callSlot: TaggedIndex): JSAny {
+ const context: Context = LoadContextFromBaseline();
const feedback: FeedbackVector = LoadFeedbackVectorFromBaseline();
const iteratorMethod: JSAny =
LoadIC(context, receiver, IteratorSymbolConstant(), loadSlot, feedback);
@@ -97,12 +97,18 @@ transitioning builtin CreateAsyncFromSyncIteratorBaseline(syncIterator: JSAny):
return CreateAsyncFromSyncIterator(context, syncIterator);
}
+macro GetLazyReceiver(receiver: JSAny): JSAny {
+ return receiver;
+}
+
transitioning builtin CallIteratorWithFeedback(
context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi,
feedback: Undefined|FeedbackVector): JSAny {
// TODO(v8:10047): Use TaggedIndex here once TurboFan supports it.
const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot));
- ic::CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged);
+ ic::CollectCallFeedback(
+ iteratorMethod, %MakeLazy<JSAny, JSAny>('GetLazyReceiver', receiver),
+ context, feedback, callSlotUnTagged);
const iteratorCallable: Callable = Cast<Callable>(iteratorMethod)
otherwise ThrowCalledNonCallable(iteratorMethod);
return Call(context, iteratorCallable, receiver);
diff --git a/chromium/v8/src/builtins/mips/builtins-mips.cc b/chromium/v8/src/builtins/mips/builtins-mips.cc
index 1d8e80bdf87..6ff2ed4b5c3 100644
--- a/chromium/v8/src/builtins/mips/builtins-mips.cc
+++ b/chromium/v8/src/builtins/mips/builtins-mips.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -102,7 +103,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// Restore context from the frame.
__ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -226,7 +227,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(s0);
// Call the function.
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- v0: constructor result
@@ -637,7 +638,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -761,8 +762,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1406,7 +1407,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
@@ -1453,7 +1454,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1756,7 +1757,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- t0 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0.
Label ok, fail;
__ AssertNotSmi(a2);
@@ -2005,7 +2006,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2364,6 +2365,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2376,10 +2383,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// a2: pointer to the first argument
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(s1, a2);
} else {
@@ -2391,7 +2398,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
@@ -2440,12 +2447,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -2698,7 +2705,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ sw(s0, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
diff --git a/chromium/v8/src/builtins/mips64/builtins-mips64.cc b/chromium/v8/src/builtins/mips64/builtins-mips64.cc
index c029188f146..9d0156e9278 100644
--- a/chromium/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/chromium/v8/src/builtins/mips64/builtins-mips64.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -102,7 +103,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// Restore context from the frame.
__ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -227,7 +228,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(a6);
// Call the function.
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- v0: constructor result
@@ -324,7 +325,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -777,8 +778,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1424,7 +1425,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
@@ -1471,7 +1472,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1815,7 +1816,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- a4 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
Label ok, fail;
__ AssertNotSmi(a2);
@@ -2073,7 +2074,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2395,7 +2396,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
__ Branch(&push_doubles, le, a1, Operand(zero_reg));
// Save vector registers.
- __ MultiPushMSA(fp_regs);
+ {
+ CpuFeatureScope msa_scope(
+ masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported);
+ __ MultiPushMSA(fp_regs);
+ }
__ Branch(&simd_pushed);
__ bind(&push_doubles);
__ MultiPushFPU(fp_regs);
@@ -2419,7 +2424,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ Lbu(a1, MemOperand(a1));
__ Branch(&pop_doubles, le, a1, Operand(zero_reg));
// Pop vector registers.
- __ MultiPopMSA(fp_regs);
+ {
+ CpuFeatureScope msa_scope(
+ masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported);
+ __ MultiPopMSA(fp_regs);
+ }
__ Branch(&simd_popped);
__ bind(&pop_doubles);
__ Daddu(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
@@ -2456,6 +2465,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2468,10 +2483,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// a2: pointer to the first argument
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(s1, a2);
} else {
@@ -2483,7 +2498,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s0: number of arguments including receiver (C callee-saved)
@@ -2532,12 +2547,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// s0: still holds argc (callee-saved).
: s0;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -2794,7 +2809,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Sd(s0, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
diff --git a/chromium/v8/src/builtins/ppc/builtins-ppc.cc b/chromium/v8/src/builtins/ppc/builtins-ppc.cc
index bc467c9ff9f..35d817d3a26 100644
--- a/chromium/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/chromium/v8/src/builtins/ppc/builtins-ppc.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -113,13 +114,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// r6: new target
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ InvokeFunctionWithNewTarget(r4, r6, r3, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
}
// Restore context from the frame.
- __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
// Restore smi-tagged arguments count from the frame.
- __ LoadP(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadU64(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
@@ -229,8 +230,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// -----------------------------------
// Restore constructor function and argument count.
- __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
- __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r3);
Label stack_overflow;
@@ -245,7 +246,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Call the function.
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ InvokeFunctionWithNewTarget(r4, r6, r3, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall);
}
// ----------- S t a t e -------------
@@ -275,12 +276,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
- __ LoadP(r3, MemOperand(sp));
+ __ LoadU64(r3, MemOperand(sp));
__ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw);
__ bind(&leave_and_return);
// Restore smi-tagged arguments count from the frame.
- __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
@@ -305,13 +306,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ bind(&do_throw);
// Restore the context from the frame.
- __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
__ bkpt(0);
__ bind(&stack_overflow);
// Restore the context from the frame.
- __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kThrowStackOverflow);
// Unreachable code.
__ bkpt(0);
@@ -347,7 +348,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -373,7 +374,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference::debug_suspended_generator_address(masm->isolate());
__ Move(scratch, debug_suspended_generator);
- __ LoadP(scratch, MemOperand(scratch));
+ __ LoadU64(scratch, MemOperand(scratch));
__ cmp(scratch, r4);
__ beq(&prepare_step_in_suspended_generator);
__ bind(&stepping_prepared);
@@ -402,19 +403,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mr(r9, r6);
-
__ bind(&loop);
- __ subi(r9, r9, Operand(1));
- __ cmpi(r9, Operand::Zero());
+ __ subi(r6, r6, Operand(1));
+ __ cmpi(r6, Operand::Zero());
__ blt(&done_loop);
- __ ShiftLeftImm(r10, r9, Operand(kTaggedSizeLog2));
+ __ ShiftLeftImm(r10, r6, Operand(kTaggedSizeLog2));
__ add(scratch, r5, r10);
__ LoadAnyTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
-
__ bind(&done_loop);
// Push receiver.
@@ -554,7 +552,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
// Save copies of the top frame descriptor on the stack.
__ Move(r3, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
masm->isolate()));
- __ LoadP(r0, MemOperand(r3));
+ __ LoadU64(r0, MemOperand(r3));
__ push(r0);
// Clear c_entry_fp, now we've pushed its previous value to the stack.
@@ -574,7 +572,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress,
masm->isolate());
__ Move(r3, js_entry_sp);
- __ LoadP(scratch, MemOperand(r3));
+ __ LoadU64(scratch, MemOperand(r3));
__ cmpi(scratch, Operand::Zero());
__ bne(&non_outermost_js);
__ StoreP(fp, MemOperand(r3));
@@ -663,7 +661,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ MultiPop(kCalleeSaved);
// Return
- __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
+ __ LoadU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
@@ -703,7 +701,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ExternalReference context_address = ExternalReference::Create(
IsolateAddressId::kContextAddress, masm->isolate());
__ Move(cp, context_address);
- __ LoadP(cp, MemOperand(cp));
+ __ LoadU64(cp, MemOperand(cp));
// Push the function.
__ Push(r5);
@@ -734,7 +732,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mtctr(r7);
__ bind(&loop);
__ LoadPU(r9, MemOperand(r8, -kSystemPointerSize)); // read next parameter
- __ LoadP(r0, MemOperand(r9)); // dereference handle
+ __ LoadU64(r0, MemOperand(r9)); // dereference handle
__ push(r0); // push parameter
__ bdnz(&loop);
__ bind(&done);
@@ -800,23 +798,23 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
FieldMemOperand(closure, JSFunction::kCodeOffset), r0);
__ mr(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
Register params_size = scratch1;
// Get the size of the formal parameters + receiver (in bytes).
- __ LoadP(params_size,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(params_size,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ lwz(params_size,
FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
Register actual_params_size = scratch2;
// Compute the size of the actual parameters + receiver (in bytes).
- __ LoadP(actual_params_size,
- MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ LoadU64(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ ShiftLeftImm(actual_params_size, actual_params_size,
Operand(kSystemPointerSizeLog2));
__ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
@@ -869,7 +867,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
__ LoadTaggedPointerField(
scratch,
FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset));
- __ LoadWordArith(
+ __ LoadS32(
scratch,
FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0);
@@ -1082,9 +1080,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register optimization_state = r7;
// Read off the optimization state in the feedback vector.
- __ LoadWord(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
- r0);
+ __ LoadU32(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset),
+ r0);
// Check if the optimized code slot is not empty or has a optimization marker.
Label has_optimized_code_or_marker;
@@ -1097,7 +1095,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&not_optimized);
// Increment invocation count for the function.
- __ LoadWord(
+ __ LoadU32(
r8,
FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset),
r0);
@@ -1165,10 +1163,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// If the bytecode array has a valid incoming new target or generator object
// register, initialize it with incoming value which was passed in r6.
Label no_incoming_new_target_or_generator_register;
- __ LoadWordArith(
- r8, FieldMemOperand(
- kInterpreterBytecodeArrayRegister,
- BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
+ __ LoadS32(r8,
+ FieldMemOperand(
+ kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
__ cmpi(r8, Operand::Zero());
__ beq(&no_incoming_new_target_or_generator_register);
__ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2));
@@ -1205,10 +1203,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// or the interpreter tail calling a builtin and then a dispatch.
// Get bytecode array and bytecode offset from the stack frame.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Either return, or advance to the next bytecode and dispatch.
@@ -1238,8 +1236,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// After the call, restore the bytecode array, bytecode offset and accumulator
// registers again. Also, restore the bytecode offset in the stack to its
// previous value.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
@@ -1313,7 +1311,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pass the spread in the register r3.
// r2 already points to the penultimate argument, the spread
// lies in the next interpreter register.
- __ LoadP(r5, MemOperand(r5, -kSystemPointerSize));
+ __ LoadU64(r5, MemOperand(r5, -kSystemPointerSize));
}
// Call the target.
@@ -1364,7 +1362,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// r4 already points to the penultimate argument, the spread
// lies in the next interpreter register.
__ subi(r7, r7, Operand(kSystemPointerSize));
- __ LoadP(r5, MemOperand(r7));
+ __ LoadU64(r5, MemOperand(r7));
} else {
__ AssertUndefinedOrAllocationSite(r5, r8);
}
@@ -1406,7 +1404,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
// custom copy of the interpreter entry trampoline for profiling. If so,
// get the custom trampoline, otherwise grab the entry address of the global
// trampoline.
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ LoadTaggedPointerField(
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
__ LoadTaggedPointerField(
@@ -1425,7 +1423,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Move(r5, ExternalReference::
address_of_interpreter_entry_trampoline_instruction_start(
masm->isolate()));
- __ LoadP(r5, MemOperand(r5));
+ __ LoadU64(r5, MemOperand(r5));
__ bind(&trampoline_loaded);
__ addi(r0, r5, Operand(interpreter_entry_return_pc_offset.value()));
@@ -1437,8 +1435,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
// Get the bytecode array pointer from the frame.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
@@ -1453,8 +1451,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
if (FLAG_debug_code) {
@@ -1478,12 +1476,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
- __ LoadP(kInterpreterBytecodeArrayRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ LoadP(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ LoadU64(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ LoadU64(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
Label enter_bytecode, function_entry_bytecode;
@@ -1524,7 +1522,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1567,7 +1565,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
__ subi(r3, r3,
Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
}
- __ LoadP(
+ __ LoadU64(
fp,
MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
// Load builtin index (stored as a Smi) and use it to get the builtin start
@@ -1609,7 +1607,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
}
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code());
- __ LoadP(r3, MemOperand(sp, 0 * kSystemPointerSize));
+ __ LoadU64(r3, MemOperand(sp, 0 * kSystemPointerSize));
__ addi(sp, sp, Operand(1 * kSystemPointerSize));
__ Ret();
}
@@ -1677,13 +1675,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ mr(r5, r8);
Label done;
- __ LoadP(r4, MemOperand(sp)); // receiver
+ __ LoadU64(r4, MemOperand(sp)); // receiver
__ cmpi(r3, Operand(1));
__ blt(&done);
- __ LoadP(r8, MemOperand(sp, kSystemPointerSize)); // thisArg
+ __ LoadU64(r8, MemOperand(sp, kSystemPointerSize)); // thisArg
__ cmpi(r3, Operand(2));
__ blt(&done);
- __ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
+ __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
@@ -1762,13 +1760,13 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
Label done;
__ cmpi(r3, Operand(1));
__ blt(&done);
- __ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
+ __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
__ cmpi(r3, Operand(2));
__ blt(&done);
- __ LoadP(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
+ __ LoadU64(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ cmpi(r3, Operand(3));
__ blt(&done);
- __ LoadP(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
+ __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2));
@@ -1812,14 +1810,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ mr(r7, r4);
__ cmpi(r3, Operand(1));
__ blt(&done);
- __ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
+ __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg
__ mr(r6, r4);
__ cmpi(r3, Operand(2));
__ blt(&done);
- __ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
+ __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
__ cmpi(r3, Operand(3));
__ blt(&done);
- __ LoadP(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
+ __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
__ bind(&done);
__ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2));
__ add(sp, sp, r0);
@@ -1847,6 +1845,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -1859,7 +1858,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register scratch = ip;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
Label ok, fail;
__ AssertNotSmi(r5);
@@ -1963,7 +1962,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
}
Label stack_done, stack_overflow;
- __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ sub(r8, r8, r5, LeaveOE, SetRC);
__ ble(&stack_done, cr0);
{
@@ -2125,7 +2124,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadHalfWord(
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(r4, no_reg, r5, r3, JUMP_FUNCTION);
+ __ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2511,6 +2510,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2523,12 +2527,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r5: pointer to the first argument
__ mr(r15, r4);
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mr(r4, r5);
} else {
@@ -2552,7 +2556,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
__ EnterExitFrame(
- save_doubles, arg_stack_space,
+ save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
@@ -2584,8 +2588,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// If return value is on the stack, pop it to registers.
if (needs_return_buffer) {
- __ LoadP(r4, MemOperand(r3, kSystemPointerSize));
- __ LoadP(r3, MemOperand(r3));
+ __ LoadU64(r4, MemOperand(r3, kSystemPointerSize));
+ __ LoadU64(r3, MemOperand(r3));
}
// Check result for exception sentinel.
@@ -2601,7 +2605,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
IsolateAddressId::kPendingExceptionAddress, masm->isolate());
__ Move(r6, pending_exception_address);
- __ LoadP(r6, MemOperand(r6));
+ __ LoadU64(r6, MemOperand(r6));
__ CompareRoot(r6, RootIndex::kTheHoleValue);
// Cannot use check here as it attempts to generate call into runtime.
__ beq(&okay);
@@ -2613,12 +2617,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// r3:r4: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// r14: still holds argc (callee-saved).
: r14;
- __ LeaveExitFrame(save_doubles, argc);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
__ blr();
// Handling of exception.
@@ -2653,11 +2657,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Retrieve the handler context, SP and FP.
__ Move(cp, pending_handler_context_address);
- __ LoadP(cp, MemOperand(cp));
+ __ LoadU64(cp, MemOperand(cp));
__ Move(sp, pending_handler_sp_address);
- __ LoadP(sp, MemOperand(sp));
+ __ LoadU64(sp, MemOperand(sp));
__ Move(fp, pending_handler_fp_address);
- __ LoadP(fp, MemOperand(fp));
+ __ LoadU64(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
@@ -2685,10 +2689,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ Move(ip, pending_handler_entrypoint_address);
- __ LoadP(ip, MemOperand(ip));
+ __ LoadU64(ip, MemOperand(ip));
if (FLAG_enable_embedded_constant_pool) {
__ Move(kConstantPoolRegister, pending_handler_constant_pool_address);
- __ LoadP(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
+ __ LoadU64(kConstantPoolRegister, MemOperand(kConstantPoolRegister));
}
__ Jump(ip);
}
@@ -2873,8 +2877,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// r15 - next_address->kLimitOffset
// r16 - next_address->kLevelOffset
__ Move(r17, next_address);
- __ LoadP(r14, MemOperand(r17, kNextOffset));
- __ LoadP(r15, MemOperand(r17, kLimitOffset));
+ __ LoadU64(r14, MemOperand(r17, kNextOffset));
+ __ LoadU64(r15, MemOperand(r17, kLimitOffset));
__ lwz(r16, MemOperand(r17, kLevelOffset));
__ addi(r16, r16, Operand(1));
__ stw(r16, MemOperand(r17, kLevelOffset));
@@ -2887,19 +2891,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Label return_value_loaded;
// load value from ReturnValue
- __ LoadP(r3, return_value_operand);
+ __ LoadU64(r3, return_value_operand);
__ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ StoreP(r14, MemOperand(r17, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ lwz(r4, MemOperand(r17, kLevelOffset));
__ cmp(r4, r16);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
}
__ subi(r16, r16, Operand(1));
__ stw(r16, MemOperand(r17, kLevelOffset));
- __ LoadP(r0, MemOperand(r17, kLimitOffset));
+ __ LoadU64(r0, MemOperand(r17, kLimitOffset));
__ cmp(r15, r0);
__ bne(&delete_allocated_handles);
@@ -2907,7 +2911,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&leave_exit_frame);
// LeaveExitFrame expects unwind space to be in a register.
if (stack_space_operand != nullptr) {
- __ LoadP(r14, *stack_space_operand);
+ __ LoadU64(r14, *stack_space_operand);
} else {
__ mov(r14, Operand(stack_space));
}
@@ -2916,7 +2920,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r14, RootIndex::kTheHoleValue);
__ Move(r15, ExternalReference::scheduled_exception_address(isolate));
- __ LoadP(r15, MemOperand(r15));
+ __ LoadU64(r15, MemOperand(r15));
__ cmp(r14, r15);
__ bne(&promote_scheduled_exception);
@@ -3151,8 +3155,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ LoadTaggedPointerField(
scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ LoadP(api_function_address,
- FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+ __ LoadU64(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
MemOperand return_value_operand(
@@ -3174,13 +3178,14 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux use a function descriptor;
- __ LoadP(ToRegister(ABI_TOC_REGISTER),
- MemOperand(temp2, kSystemPointerSize));
- __ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address
+ __ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(temp2, kSystemPointerSize));
+ __ LoadU64(temp2, MemOperand(temp2, 0)); // Instruction address
}
__ Call(temp2); // Call the C++ function.
- __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
+ __ LoadU64(r0,
+ MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
__ mtlr(r0);
__ blr();
}
@@ -3230,9 +3235,6 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize;
- // Get the bailout id is passed as r29 by the caller.
- __ mr(r5, r29);
-
__ mov(r5, Operand(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object (r6) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
@@ -3246,9 +3248,10 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ PrepareCallCFunction(6, r8);
__ li(r3, Operand::Zero());
Label context_check;
- __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ LoadU64(r4,
+ MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(r4, &context_check);
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(r4, Operand(static_cast<int>(deopt_kind)));
// r5: bailout id already loaded.
@@ -3263,14 +3266,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Preserve "deoptimizer" object in register r3 and get the input
// frame descriptor pointer to r4 (deoptimizer->input_);
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+ __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
- __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize));
+ __ LoadU64(r5, MemOperand(sp, i * kSystemPointerSize));
__ StoreP(r5, MemOperand(r4, offset));
}
@@ -3302,7 +3305,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// Compute a pointer to the unwinding limit in register r5; that is
// the first stack slot not part of the input frame.
- __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+ __ LoadU64(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
__ add(r5, r5, sp);
// Unwind the stack down to - but not including - the unwinding
@@ -3331,28 +3334,29 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
}
__ pop(r3); // Restore deoptimizer object (class Deoptimizer).
- __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
+ __ LoadU64(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
// Outer loop state: r7 = current "FrameDescription** output_",
// r4 = one past the last FrameDescription**.
__ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
- __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
+ __ LoadU64(r7,
+ MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
__ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2));
__ add(r4, r7, r4);
__ b(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r5 = current FrameDescription*, r6 = loop index.
- __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
- __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
+ __ LoadU64(r5, MemOperand(r7, 0)); // output_[ix]
+ __ LoadU64(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
__ b(&inner_loop_header);
__ bind(&inner_push_loop);
__ addi(r6, r6, Operand(-sizeof(intptr_t)));
__ add(r9, r5, r6);
- __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
+ __ LoadU64(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
__ push(r9);
__ bind(&inner_loop_header);
@@ -3364,7 +3368,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ cmp(r7, r4);
__ blt(&outer_push_loop);
- __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+ __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister dreg = DoubleRegister::from_code(code);
@@ -3373,9 +3377,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
}
// Push pc, and continuation from the last output frame.
- __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
+ __ LoadU64(r9, MemOperand(r5, FrameDescription::pc_offset()));
__ push(r9);
- __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
+ __ LoadU64(r9, MemOperand(r5, FrameDescription::continuation_offset()));
__ push(r9);
// Restore the registers from the last output frame.
@@ -3388,7 +3392,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
int offset =
(i * kSystemPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
- __ LoadP(ToRegister(i), MemOperand(scratch, offset));
+ __ LoadU64(ToRegister(i), MemOperand(scratch, offset));
}
}
}
@@ -3465,11 +3469,12 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot);
Register handler_arg =
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler);
- __ LoadP(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ LoadP(
+ __ LoadU64(handler_arg,
+ MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ LoadU64(
slot_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ LoadP(
+ __ LoadU64(
handler_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
diff --git a/chromium/v8/src/builtins/promise-abstract-operations.tq b/chromium/v8/src/builtins/promise-abstract-operations.tq
index b7a1b571e64..0e435afad9b 100644
--- a/chromium/v8/src/builtins/promise-abstract-operations.tq
+++ b/chromium/v8/src/builtins/promise-abstract-operations.tq
@@ -196,6 +196,8 @@ FulfillPromise(implicit context: Context)(
// Assert: The value of promise.[[PromiseState]] is "pending".
assert(promise.Status() == PromiseState::kPending);
+ RunContextPromiseHookResolve(promise);
+
// 2. Let reactions be promise.[[PromiseFulfillReactions]].
const reactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
@@ -214,17 +216,24 @@ FulfillPromise(implicit context: Context)(
}
extern macro PromiseBuiltinsAssembler::
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
+ IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(): bool;
+
+extern macro PromiseBuiltinsAssembler::
+ IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(uint32):
+ bool;
// https://tc39.es/ecma262/#sec-rejectpromise
transitioning builtin
RejectPromise(implicit context: Context)(
promise: JSPromise, reason: JSAny, debugEvent: Boolean): JSAny {
+ const promiseHookFlags = PromiseHookFlags();
+
// If promise hook is enabled or the debugger is active, let
// the runtime handle this operation, which greatly reduces
// the complexity here and also avoids a couple of back and
// forth between JavaScript and C++ land.
- if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ if (IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ promiseHookFlags) ||
!promise.HasHandler()) {
// 7. If promise.[[PromiseIsHandled]] is false, perform
// HostPromiseRejectionTracker(promise, "reject").
@@ -233,6 +242,8 @@ RejectPromise(implicit context: Context)(
return runtime::RejectPromise(promise, reason, debugEvent);
}
+ RunContextPromiseHookResolve(promise, promiseHookFlags);
+
// 2. Let reactions be promise.[[PromiseRejectReactions]].
const reactions =
UnsafeCast<(Zero | PromiseReaction)>(promise.reactions_or_result);
diff --git a/chromium/v8/src/builtins/promise-all.tq b/chromium/v8/src/builtins/promise-all.tq
index 41dee8b9e76..5ab64a167d3 100644
--- a/chromium/v8/src/builtins/promise-all.tq
+++ b/chromium/v8/src/builtins/promise-all.tq
@@ -231,8 +231,7 @@ Reject(Object) {
// the PromiseReaction (aka we can pass undefined to
// PerformPromiseThen), since this is only necessary for DevTools and
// PromiseHooks.
- if (promiseResolveFunction != Undefined ||
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ if (promiseResolveFunction != Undefined || NeedsAnyPromiseHooks() ||
IsPromiseSpeciesProtectorCellInvalid() || Is<Smi>(nextValue) ||
!IsPromiseThenLookupChainIntact(
nativeContext, UnsafeCast<HeapObject>(nextValue).map)) {
diff --git a/chromium/v8/src/builtins/promise-constructor.tq b/chromium/v8/src/builtins/promise-constructor.tq
index 3c5a5e560d4..b5f7292a77c 100644
--- a/chromium/v8/src/builtins/promise-constructor.tq
+++ b/chromium/v8/src/builtins/promise-constructor.tq
@@ -40,7 +40,8 @@ extern macro ConstructorBuiltinsAssembler::FastNewObject(
Context, JSFunction, JSReceiver): JSObject;
extern macro
-PromiseBuiltinsAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate(): bool;
+PromiseBuiltinsAssembler::IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(
+ uint32): bool;
// https://tc39.es/ecma262/#sec-promise-executor
transitioning javascript builtin
@@ -73,9 +74,7 @@ PromiseConstructor(
result = UnsafeCast<JSPromise>(
FastNewObject(context, promiseFun, UnsafeCast<JSReceiver>(newTarget)));
PromiseInit(result);
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(result, Undefined);
- }
+ RunAnyPromiseHookInit(result, Undefined);
}
const isDebugActive = IsDebugActive();
diff --git a/chromium/v8/src/builtins/promise-jobs.tq b/chromium/v8/src/builtins/promise-jobs.tq
index 80e98f373b9..77d2e7cf9c4 100644
--- a/chromium/v8/src/builtins/promise-jobs.tq
+++ b/chromium/v8/src/builtins/promise-jobs.tq
@@ -7,6 +7,7 @@
// https://tc39.es/ecma262/#sec-promise-jobs
namespace promise {
extern macro IsJSPromiseMap(Map): bool;
+extern macro NeedsAnyPromiseHooks(): bool;
// https://tc39.es/ecma262/#sec-promiseresolvethenablejob
transitioning builtin
@@ -25,7 +26,7 @@ PromiseResolveThenableJob(implicit context: Context)(
const promiseThen = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX);
const thenableMap = thenable.map;
if (TaggedEqual(then, promiseThen) && IsJSPromiseMap(thenableMap) &&
- !IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() &&
+ !NeedsAnyPromiseHooks() &&
IsPromiseSpeciesLookupChainIntact(nativeContext, thenableMap)) {
// We know that the {thenable} is a JSPromise, which doesn't require
// any special treatment and that {then} corresponds to the initial
diff --git a/chromium/v8/src/builtins/promise-misc.tq b/chromium/v8/src/builtins/promise-misc.tq
index 67e5e38687d..0eae717b3fc 100644
--- a/chromium/v8/src/builtins/promise-misc.tq
+++ b/chromium/v8/src/builtins/promise-misc.tq
@@ -8,6 +8,9 @@
namespace runtime {
extern transitioning runtime
AllowDynamicFunction(implicit context: Context)(JSAny): JSAny;
+
+extern transitioning runtime
+ReportMessageFromMicrotask(implicit context: Context)(JSAny): JSAny;
}
// Unsafe functions that should be used very carefully.
@@ -17,6 +20,12 @@ extern macro PromiseBuiltinsAssembler::ZeroOutEmbedderOffsets(JSPromise): void;
extern macro PromiseBuiltinsAssembler::AllocateJSPromise(Context): HeapObject;
}
+extern macro
+PromiseBuiltinsAssembler::IsContextPromiseHookEnabled(uint32): bool;
+
+extern macro
+PromiseBuiltinsAssembler::PromiseHookFlags(): uint32;
+
namespace promise {
extern macro IsFunctionWithPrototypeSlotMap(Map): bool;
@@ -90,6 +99,109 @@ macro NewPromiseRejectReactionJobTask(implicit context: Context)(
};
}
+@export
+transitioning macro RunContextPromiseHookInit(implicit context: Context)(
+ promise: JSPromise, parent: Object) {
+ const maybeHook = *NativeContextSlot(
+ ContextSlot::PROMISE_HOOK_INIT_FUNCTION_INDEX);
+ const hook = Cast<Callable>(maybeHook) otherwise return;
+ const parentObject = Is<JSPromise>(parent) ? Cast<JSPromise>(parent)
+ otherwise unreachable: Undefined;
+
+ try {
+ Call(context, hook, Undefined, promise, parentObject);
+ } catch (e) {
+ runtime::ReportMessageFromMicrotask(e);
+ }
+}
+
+@export
+transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
+ promise: JSPromise) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise,
+ PromiseHookFlags());
+}
+
+@export
+transitioning macro RunContextPromiseHookResolve(implicit context: Context)(
+ promise: JSPromise, flags: uint32) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, promise, flags);
+}
+
+@export
+transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
+ PromiseHookFlags());
+}
+
+@export
+transitioning macro RunContextPromiseHookBefore(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_BEFORE_FUNCTION_INDEX, promiseOrCapability,
+ flags);
+}
+
+@export
+transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
+ PromiseHookFlags());
+}
+
+@export
+transitioning macro RunContextPromiseHookAfter(implicit context: Context)(
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ RunContextPromiseHook(
+ ContextSlot::PROMISE_HOOK_AFTER_FUNCTION_INDEX, promiseOrCapability,
+ flags);
+}
+
+transitioning macro RunContextPromiseHook(implicit context: Context)(
+ slot: Slot<NativeContext, Undefined|Callable>,
+ promiseOrCapability: JSPromise|PromiseCapability|Undefined, flags: uint32) {
+ if (!IsContextPromiseHookEnabled(flags)) return;
+ const maybeHook = *NativeContextSlot(slot);
+ const hook = Cast<Callable>(maybeHook) otherwise return;
+
+ let promise: JSPromise;
+ typeswitch (promiseOrCapability) {
+ case (jspromise: JSPromise): {
+ promise = jspromise;
+ }
+ case (capability: PromiseCapability): {
+ promise = Cast<JSPromise>(capability.promise) otherwise return;
+ }
+ case (Undefined): {
+ return;
+ }
+ }
+
+ try {
+ Call(context, hook, Undefined, promise);
+ } catch (e) {
+ runtime::ReportMessageFromMicrotask(e);
+ }
+}
+
+transitioning macro RunAnyPromiseHookInit(implicit context: Context)(
+ promise: JSPromise, parent: Object) {
+ const promiseHookFlags = PromiseHookFlags();
+ // Fast return if no hooks are set.
+ if (promiseHookFlags == 0) return;
+ if (IsContextPromiseHookEnabled(promiseHookFlags)) {
+ RunContextPromiseHookInit(promise, parent);
+ }
+ if (IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(promiseHookFlags)) {
+ runtime::PromiseHookInit(promise, parent);
+ }
+}
+
// These allocate and initialize a promise with pending state and
// undefined fields.
//
@@ -100,9 +212,7 @@ transitioning macro NewJSPromise(implicit context: Context)(parent: Object):
JSPromise {
const instance = InnerNewJSPromise();
PromiseInit(instance);
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(instance, parent);
- }
+ RunAnyPromiseHookInit(instance, parent);
return instance;
}
@@ -124,10 +234,7 @@ transitioning macro NewJSPromise(implicit context: Context)(
instance.reactions_or_result = result;
instance.SetStatus(status);
promise_internal::ZeroOutEmbedderOffsets(instance);
-
- if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) {
- runtime::PromiseHookInit(instance, Undefined);
- }
+ RunAnyPromiseHookInit(instance, Undefined);
return instance;
}
diff --git a/chromium/v8/src/builtins/promise-resolve.tq b/chromium/v8/src/builtins/promise-resolve.tq
index e933dfbae0a..fa3d19411fc 100644
--- a/chromium/v8/src/builtins/promise-resolve.tq
+++ b/chromium/v8/src/builtins/promise-resolve.tq
@@ -30,7 +30,8 @@ transitioning builtin
PromiseResolve(implicit context: Context)(
constructor: JSReceiver, value: JSAny): JSAny {
const nativeContext = LoadNativeContext(context);
- const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX);
+ const promiseFun = *NativeContextSlot(
+ nativeContext, ContextSlot::PROMISE_FUNCTION_INDEX);
try {
// Check if {value} is a JSPromise.
const value = Cast<JSPromise>(value) otherwise NeedToAllocate;
@@ -40,7 +41,8 @@ PromiseResolve(implicit context: Context)(
// intact, as that guards the lookup path for "constructor" on
// JSPromise instances which have the (initial) Promise.prototype.
const promisePrototype =
- *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX);
+ *NativeContextSlot(
+ nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX);
// Check that Torque load elimination works.
static_assert(nativeContext == LoadNativeContext(context));
if (value.map.prototype != promisePrototype) {
@@ -97,7 +99,7 @@ ResolvePromise(implicit context: Context)(
// We also let the runtime handle it if promise == resolution.
// We can use pointer comparison here, since the {promise} is guaranteed
// to be a JSPromise inside this function and thus is reference comparable.
- if (IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
+ if (IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() ||
TaggedEqual(promise, resolution))
deferred {
return runtime::ResolvePromise(promise, resolution);
@@ -139,7 +141,8 @@ ResolvePromise(implicit context: Context)(
assert(IsJSReceiverMap(resolutionMap));
assert(!IsPromiseThenProtectorCellInvalid());
if (resolutionMap ==
- *NativeContextSlot(ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
+ *NativeContextSlot(
+ nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX)) {
return FulfillPromise(promise, resolution);
} else {
goto Slow;
@@ -147,10 +150,11 @@ ResolvePromise(implicit context: Context)(
}
const promisePrototype =
- *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX);
+ *NativeContextSlot(
+ nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX);
if (resolutionMap.prototype == promisePrototype) {
// The {resolution} is a native Promise in this case.
- then = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX);
+ then = *NativeContextSlot(nativeContext, ContextSlot::PROMISE_THEN_INDEX);
// Check that Torque load elimination works.
static_assert(nativeContext == LoadNativeContext(context));
goto Enqueue;
diff --git a/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc b/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
index 04907f5268a..afd9a1fca1c 100644
--- a/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/chromium/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/debug/debug.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
@@ -100,7 +101,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// Restore context from the frame.
__ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -225,7 +226,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(a6);
// Call the function.
- __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
// ----------- S t a t e -------------
// -- a0: constructor result
@@ -300,12 +301,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
-static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
- Register sfi_data,
- Register scratch1) {
+// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
+// the more general dispatch.
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
+ __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -325,7 +330,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
__ Sd(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, a3,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -388,12 +393,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
+ Label is_baseline;
__ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, a3, a0);
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
__ GetObjectType(a3, a3, a3);
__ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
Operand(BYTECODE_ARRAY_TYPE));
+ __ bind(&is_baseline);
}
// Resume (Ignition/TurboFan) generator object.
@@ -763,8 +770,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ Move(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -964,6 +971,184 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
__ bind(&end);
}
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ __ RecordComment("[ Check optimization state");
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ Lw(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ __ RecordComment("]");
+}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ __ And(
+ t0, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg));
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Ld(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5);
+}
+
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ temps.Include(kScratchReg.bit() | kScratchReg2.bit());
+ auto descriptor = Builtins::CallInterfaceDescriptorFor(
+ Builtins::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.Acquire();
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ __ GetObjectType(feedback_vector, t0, t0);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, t0,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = temps.Acquire();
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
+
+ // Increment invocation count for the function.
+ {
+ Register invocation_count = t0;
+ __ Lw(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add32(invocation_count, invocation_count, Operand(1));
+ __ Sw(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ __ RecordComment("[ Frame Setup");
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ // Normally the first thing we'd do here is Push(lr, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value lr before the call to this BaselineOutOfLinePrologue builtin.
+
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ Sh(zero_reg,
+ FieldMemOperand(bytecodeArray, BytecodeArray::kOsrNestingLevelOffset));
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ __ GetObjectType(feedback_vector, t0, t0);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, t0,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Our stack is currently aligned. We have have to push something along with
+ // the feedback vector to keep it that way -- we may as well start
+ // initialising the register frame.
+ // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
+ // `undefined` in the accumulator register, to skip the load in the baseline
+ // code.
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Push(feedback_vector, kInterpreterAccumulatorRegister);
+ __ RecordComment("]");
+
+ __ RecordComment("[ Stack/interrupt check");
+ Label call_stack_guard;
+ {
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ Register frame_size = t0;
+ __ Ld(frame_size,
+ FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset));
+ Register sp_minus_frame_size = frame_size;
+ __ Sub64(sp_minus_frame_size, sp, frame_size);
+ Register interrupt_limit = t1;
+ __ LoadStackLimit(interrupt_limit,
+ MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
+ Operand(interrupt_limit));
+ __ RecordComment("]");
+ }
+
+ // Do "fast" return to the caller pc in lr.
+ // TODO(v8:11429): Document this frame setup better.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ __ RecordComment("[ Optimized marker check");
+ // Drop the frame created by the baseline call.
+ __ Pop(fp, ra);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ __ RecordComment("]");
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ Register new_target = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
+
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ __ RecordComment("[ Stack/interrupt call");
+ // Save incoming new target or generator
+ __ Push(zero_reg, new_target);
+ __ CallRuntime(Runtime::kStackGuard);
+ __ Pop(new_target, zero_reg);
+ __ RecordComment("]");
+ }
+ __ Ret();
+ temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
+}
+
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
@@ -989,8 +1174,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
- GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister,
- kScratchReg);
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
// The bytecode array could have been flushed from the shared function info,
// if so, call into CompileLazy.
@@ -1188,6 +1374,44 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FeedbackVector::kMaybeOptimizedCodeOffset));
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5);
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ Ld(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lh(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Read off the optimization state in the feedback vector.
+ // TODO(v8:11429): Is this worth doing here? Baseline code will check it
+ // anyway...
+ __ Ld(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if there is optimized code or a optimization marker that needes to
+ // be processed.
+ __ And(
+ t0, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+
+ // Load the baseline code into the closure.
+ __ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BaselineData::kBaselineCodeOffset));
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
+ __ JumpCodeObject(a2);
+
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
__ bind(&compile_lazy);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
@@ -1407,7 +1631,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
@@ -1454,7 +1678,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1503,12 +1727,12 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
// Load builtin index (stored as a Smi) and use it to get the builtin start
// address from the builtins table.
- __ Pop(t0);
+ __ Pop(t6);
__ Add64(sp, sp,
Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
__ Pop(ra);
- __ LoadEntryFromBuiltinIndex(t0);
- __ Jump(t0);
+ __ LoadEntryFromBuiltinIndex(t6);
+ __ Jump(t6);
}
} // namespace
@@ -1542,7 +1766,20 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
__ Ret();
}
-void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
+ Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, t0);
+}
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand(int64_t(0))) {
+ __ Add64(ra, entry_address, offset);
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileForOnStackReplacement);
@@ -1550,11 +1787,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// If the code object is null, just return to the caller.
__ Ret(eq, a0, Operand(Smi::zero()));
-
- // Drop the handler frame that is be sitting on top of the actual
- // JavaScript frame. This is the case then OSR is triggered from bytecode.
- __ LeaveFrame(StackFrame::STUB);
-
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ Ld(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
@@ -1568,9 +1805,18 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Add64(a0, a0, a1);
- __ Add64(ra, a0, Code::kHeaderSize - kHeapObjectTag);
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ Ld(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ return OnStackReplacement(masm, false);
}
// static
@@ -1808,7 +2054,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- a4 : len (number of elements to push from args)
// -- a3 : new.target (for [[Construct]])
// -----------------------------------
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
Label ok, fail;
__ AssertNotSmi(a2);
@@ -2070,7 +2316,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Lhu(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION);
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2438,10 +2684,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// a2: pointer to the first argument
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ Move(s1, a2);
} else {
@@ -2453,7 +2699,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
- save_doubles == kSaveFPRegs, 0,
+ save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// s3: number of arguments including receiver (C callee-saved)
@@ -2502,12 +2748,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// a0:a1: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// s3: still holds argc (callee-saved).
: s3;
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
// Handling of exception.
__ bind(&exception_returned);
@@ -2689,6 +2935,10 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ Trap();
}
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
namespace {
int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@@ -2762,7 +3012,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ Sd(s3, MemOperand(s5, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Lw(a1, MemOperand(s5, kLevelOffset));
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
Operand(s2));
@@ -3228,9 +3478,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
}
}
- __ pop(t3); // Get continuation, leave pc on stack.
+ __ pop(t6); // Get continuation, leave pc on stack.
__ pop(ra);
- __ Jump(t3);
+ __ Jump(t6);
__ stop();
}
@@ -3252,6 +3502,146 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
}
+namespace {
+
+// Converts an interpreter frame into a baseline frame and continues execution
+// in baseline code (baseline code has to exist on the shared function info),
+// either at the start or the end of the current bytecode.
+void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
+ bool is_osr = false) {
+ __ Push(zero_reg, kInterpreterAccumulatorRegister);
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = a1;
+ __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Replace BytecodeOffset with the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ GetObjectType(feedback_vector, t0, t0);
+ __ Branch(&install_baseline_code, eq, t0, Operand(FEEDBACK_VECTOR_TYPE));
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ Sd(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Get the Code object from the shared function info.
+ UseScratchRegisterScope temps(masm);
+ Register code_obj = temps.Acquire();
+ __ Ld(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+
+ // Compute baseline pc for bytecode offset.
+ __ Push(zero_reg, kInterpreterAccumulatorRegister);
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+
+ Register get_baseline_pc = a3;
+ __ li(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ }
+
+ __ Sub64(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ {
+ Register arg_reg_1 = a0;
+ Register arg_reg_2 = a1;
+ Register arg_reg_3 = a2;
+ __ Move(arg_reg_1, code_obj);
+ __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ Add64(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister, zero_reg);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0)));
+ if (next_bytecode) {
+ __ li(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ Branch(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ }
+ // Retry from the start after installing baseline code.
+ __ Branch(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineEntry(masm, false, true);
+}
+
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL);
diff --git a/chromium/v8/src/builtins/s390/builtins-s390.cc b/chromium/v8/src/builtins/s390/builtins-s390.cc
index 7711af6e901..0272621ac01 100644
--- a/chromium/v8/src/builtins/s390/builtins-s390.cc
+++ b/chromium/v8/src/builtins/s390/builtins-s390.cc
@@ -6,6 +6,7 @@
#include "src/api/api-arguments.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
@@ -110,7 +111,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// r3: constructor function
// r5: new target
- __ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
// Restore context from the frame.
__ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
@@ -238,7 +239,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r8);
// Call the function.
- __ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION);
+ __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall);
// ----------- S t a t e -------------
// -- r0: constructor result
@@ -339,7 +340,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset), r0);
__ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore);
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -395,18 +396,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
- __ mov(r8, r5);
-
__ bind(&loop);
- __ SubS64(r8, r8, Operand(1));
+ __ SubS64(r5, r5, Operand(1));
__ blt(&done_loop);
- __ ShiftLeftU64(r1, r8, Operand(kTaggedSizeLog2));
+ __ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
__ la(scratch, MemOperand(r4, r1));
__ LoadAnyTaggedField(scratch,
FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ b(&loop);
-
__ bind(&done_loop);
// Push receiver.
@@ -857,8 +855,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
__ mov(scratch1,
optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1527,7 +1525,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ Jump(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ LoadU64(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1573,7 +1571,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
@@ -1890,6 +1888,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -1902,7 +1901,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Register scratch = ip;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
Label ok, fail;
__ AssertNotSmi(r4);
@@ -2177,7 +2176,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ LoadU16(
r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(r3, no_reg, r4, r2, JUMP_FUNCTION);
+ __ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2549,6 +2548,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// TODO(v8:10701): Implement for this platform.
__ Trap();
}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -2561,12 +2565,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// sp: stack pointer (restored as callee's sp after C call)
// cp: current context (C callee-saved)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r4: pointer to the first argument
__ mov(r7, r3);
- if (argv_mode == kArgvInRegister) {
+ if (argv_mode == ArgvMode::kRegister) {
// Move argv into the correct register.
__ mov(r3, r4);
} else {
@@ -2594,7 +2598,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
#endif
__ EnterExitFrame(
- save_doubles, arg_stack_space,
+ save_doubles == SaveFPRegsMode::kSave, arg_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc, argv in callee-saved registers for later.
@@ -2657,12 +2661,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// r2:r3: result
// sp: stack pointer
// fp: frame pointer
- Register argc = argv_mode == kArgvInRegister
+ Register argc = argv_mode == ArgvMode::kRegister
// We don't want to pop arguments so set argc to no_reg.
? no_reg
// r6: still holds argc (callee-saved).
: r6;
- __ LeaveExitFrame(save_doubles, argc);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
__ b(r14);
// Handling of exception.
@@ -2916,7 +2920,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
__ StoreU64(r6, MemOperand(r9, kNextOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ LoadU32(r3, MemOperand(r9, kLevelOffset));
__ CmpS64(r3, r8);
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
@@ -3464,11 +3468,12 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot);
Register handler_arg =
descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler);
- __ LoadP(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
- __ LoadP(
+ __ LoadU64(handler_arg,
+ MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ LoadU64(
slot_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
- __ LoadP(
+ __ LoadU64(
handler_arg,
MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
diff --git a/chromium/v8/src/builtins/setup-builtins-internal.cc b/chromium/v8/src/builtins/setup-builtins-internal.cc
index 348866c9bde..fbcfab56f43 100644
--- a/chromium/v8/src/builtins/setup-builtins-internal.cc
+++ b/chromium/v8/src/builtins/setup-builtins-internal.cc
@@ -11,7 +11,6 @@
#include "src/compiler/code-assembler.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
-#include "src/heap/heap-inl.h" // For Heap::code_range.
#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/interpreter-generator.h"
@@ -42,10 +41,10 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
return options;
}
- const base::AddressRegion& code_range = isolate->heap()->code_range();
+ const base::AddressRegion& code_region = isolate->heap()->code_region();
bool pc_relative_calls_fit_in_code_range =
- !code_range.is_empty() &&
- std::ceil(static_cast<float>(code_range.size() / MB)) <=
+ !code_region.is_empty() &&
+ std::ceil(static_cast<float>(code_region.size() / MB)) <=
kMaxPCRelativeCodeRangeInMB;
options.isolate_independent_code = true;
@@ -219,7 +218,7 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) {
// static
void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
- // Replace references from all code objects to placeholders.
+ // Replace references from all builtin code objects to placeholders.
Builtins* builtins = isolate->builtins();
DisallowGarbageCollection no_gc;
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
@@ -228,11 +227,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
- HeapObjectIterator iterator(isolate->heap());
- for (HeapObject obj = iterator.Next(); !obj.is_null();
- obj = iterator.Next()) {
- if (!obj.IsCode()) continue;
- Code code = Code::cast(obj);
+ for (int i = 0; i < Builtins::builtin_count; i++) {
+ Code code = builtins->builtin(i);
bool flush_icache = false;
for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) {
RelocInfo* rinfo = it.rinfo();
diff --git a/chromium/v8/src/builtins/typed-array-createtypedarray.tq b/chromium/v8/src/builtins/typed-array-createtypedarray.tq
index 6e416ddd98f..6333ebf97fd 100644
--- a/chromium/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/chromium/v8/src/builtins/typed-array-createtypedarray.tq
@@ -19,11 +19,17 @@ extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields(
extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)(
Map, String): never;
+extern runtime GrowableSharedArrayBufferByteLength(implicit context: Context)(
+ Object): JSAny;
+
transitioning macro AllocateTypedArray(implicit context: Context)(
isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer,
- byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray {
+ byteOffset: uintptr, byteLength: uintptr, length: uintptr,
+ isLengthTracking: bool): JSTypedArray {
let elements: ByteArray;
if constexpr (isOnHeap) {
+ assert(!IsResizableArrayBuffer(buffer));
+ assert(!isLengthTracking);
elements = AllocateByteArray(byteLength);
} else {
elements = kEmptyByteArray;
@@ -53,6 +59,9 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.byte_offset = byteOffset;
typedArray.byte_length = byteLength;
typedArray.length = length;
+ typedArray.bit_field.is_length_tracking = isLengthTracking;
+ typedArray.bit_field.is_backed_by_rab =
+ IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer);
typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray);
if constexpr (isOnHeap) {
typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
@@ -88,8 +97,10 @@ transitioning macro TypedArrayInitialize(implicit context: Context)(
const buffer = AllocateEmptyOnHeapBuffer(byteLength);
const isOnHeap: constexpr bool = true;
+ const isLengthTracking: constexpr bool = false;
const typedArray = AllocateTypedArray(
- isOnHeap, map, buffer, byteOffset, byteLength, length);
+ isOnHeap, map, buffer, byteOffset, byteLength, length,
+ isLengthTracking);
if constexpr (initialize) {
const backingStore = typedArray.data_ptr;
@@ -107,8 +118,10 @@ transitioning macro TypedArrayInitialize(implicit context: Context)(
} label AttachOffHeapBuffer(bufferObj: Object) {
const buffer = Cast<JSArrayBuffer>(bufferObj) otherwise unreachable;
const isOnHeap: constexpr bool = false;
+ const isLengthTracking: constexpr bool = false;
return AllocateTypedArray(
- isOnHeap, map, buffer, byteOffset, byteLength, length);
+ isOnHeap, map, buffer, byteOffset, byteLength, length,
+ isLengthTracking);
}
}
@@ -204,8 +217,26 @@ transitioning macro ConstructByTypedArray(implicit context: Context)(
// 22.2.4.5 TypedArray ( buffer, byteOffset, length )
// ES #sec-typedarray-buffer-byteoffset-length
transitioning macro ConstructByArrayBuffer(implicit context: Context)(
- map: Map, buffer: JSArrayBuffer, byteOffset: JSAny, length: JSAny,
- elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray {
+ target: JSFunction, newTarget: JSReceiver, buffer: JSArrayBuffer,
+ byteOffset: JSAny, length: JSAny): JSTypedArray {
+ let map: Map;
+ const isLengthTracking: bool =
+ IsResizableArrayBuffer(buffer) && (length == Undefined);
+ // Pick the RAB / GSAB map (containing the corresponding RAB / GSAB
+ // ElementsKind). GSAB-backed non-length-tracking TypedArrays behave just like
+ // normal TypedArrays, so exclude them.
+ const rabGsab: bool = IsResizableArrayBuffer(buffer) &&
+ (!IsSharedArrayBuffer(buffer) || isLengthTracking);
+ if (rabGsab) {
+ map = GetDerivedRabGsabMap(target, newTarget);
+ } else {
+ map = GetDerivedMap(target, newTarget);
+ }
+
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo = GetTypedArrayElementsInfo(map);
+
try {
// 6. Let offset be ? ToIndex(byteOffset).
const offset: uintptr = ToIndex(byteOffset) otherwise IfInvalidOffset;
@@ -226,7 +257,13 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
}
// 10. Let bufferByteLength be buffer.[[ArrayBufferByteLength]].
- const bufferByteLength: uintptr = buffer.byte_length;
+ let bufferByteLength: uintptr;
+ if (IsResizableArrayBuffer(buffer) && IsSharedArrayBuffer(buffer)) {
+ bufferByteLength = ToIndex(GrowableSharedArrayBufferByteLength(buffer))
+ otherwise unreachable;
+ } else {
+ bufferByteLength = buffer.byte_length;
+ }
// 11. If length is either not present or undefined, then
if (length == Undefined) {
@@ -261,7 +298,8 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)(
const isOnHeap: constexpr bool = false;
return AllocateTypedArray(
- isOnHeap, map, buffer, offset, newByteLength, newLength);
+ isOnHeap, map, buffer, offset, newByteLength, newLength,
+ isLengthTracking);
} label IfInvalidAlignment(problemString: String) deferred {
ThrowInvalidTypedArrayAlignment(map, problemString);
} label IfInvalidLength deferred {
@@ -286,6 +324,8 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)(
// ValidateTypedArray currently returns the array, not the ViewBuffer.
const newTypedArray: JSTypedArray =
ValidateTypedArray(context, newTypedArrayObj, methodName);
+ newTypedArray.bit_field.is_length_tracking = false;
+ newTypedArray.bit_field.is_backed_by_rab = false;
if (IsDetachedBuffer(newTypedArray.buffer)) deferred {
ThrowTypeError(MessageTemplate::kDetachedOperation, methodName);
@@ -336,21 +376,16 @@ transitioning builtin CreateTypedArray(
assert(IsConstructor(target));
// 4. Let O be ? AllocateTypedArray(constructorName, NewTarget,
// "%TypedArrayPrototype%").
- const map = GetDerivedMap(target, newTarget);
-
- // 5. Let elementSize be the Number value of the Element Size value in Table
- // 56 for constructorName.
- const elementsInfo = GetTypedArrayElementsInfo(map);
-
try {
typeswitch (arg1) {
case (length: Smi): {
goto IfConstructByLength(length);
}
case (buffer: JSArrayBuffer): {
- return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo);
+ return ConstructByArrayBuffer(target, newTarget, buffer, arg2, arg3);
}
case (typedArray: JSTypedArray): {
+ // TODO(v8:11111): Support RAB / GSAB.
ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike;
}
case (obj: JSReceiver): {
@@ -363,9 +398,18 @@ transitioning builtin CreateTypedArray(
}
}
} label IfConstructByLength(length: JSAny) {
+ const map = GetDerivedMap(target, newTarget);
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo = GetTypedArrayElementsInfo(map);
+
return ConstructByLength(map, length, elementsInfo);
} label IfConstructByArrayLike(
arrayLike: JSReceiver, length: uintptr, bufferConstructor: JSReceiver) {
+ const map = GetDerivedMap(target, newTarget);
+ // 5. Let elementSize be the Number value of the Element Size value in Table
+ // 56 for constructorName.
+ const elementsInfo = GetTypedArrayElementsInfo(map);
return ConstructByArrayLike(
map, arrayLike, length, elementsInfo, bufferConstructor);
}
diff --git a/chromium/v8/src/builtins/wasm.tq b/chromium/v8/src/builtins/wasm.tq
index 05a15162040..f859d1e0bf2 100644
--- a/chromium/v8/src/builtins/wasm.tq
+++ b/chromium/v8/src/builtins/wasm.tq
@@ -287,8 +287,9 @@ builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map {
}
builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject {
- const instanceSize: intptr =
- unsafe::TimesTaggedSize(Convert<intptr>(rtt.instance_size_in_words));
+ const typeInfo: WasmTypeInfo = %RawDownCast<WasmTypeInfo>(
+ rtt.constructor_or_back_pointer_or_native_context);
+ const instanceSize: intptr = SmiUntag(typeInfo.instance_size);
const result: HeapObject = unsafe::Allocate(
instanceSize, AllocationFlag::kAllowLargeObjectAllocation);
*UnsafeConstCast(&result.map) = rtt;
diff --git a/chromium/v8/src/builtins/x64/builtins-x64.cc b/chromium/v8/src/builtins/x64/builtins-x64.cc
index 5b5e964ef95..7fc7c5dec78 100644
--- a/chromium/v8/src/builtins/x64/builtins-x64.cc
+++ b/chromium/v8/src/builtins/x64/builtins-x64.cc
@@ -8,18 +8,19 @@
#include "src/base/bits-iterator.h"
#include "src/base/iterator.h"
#include "src/codegen/code-factory.h"
-#include "src/common/globals.h"
-#include "src/objects/code.h"
+#include "src/codegen/interface-descriptors-inl.h"
// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/x64/assembler-x64.h"
+#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
#include "src/objects/cell.h"
+#include "src/objects/code.h"
#include "src/objects/debug-objects.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
@@ -118,7 +119,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// rax: number of arguments (untagged)
// rdi: constructor function
// rdx: new target
- __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
+ __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
// Restore smi-tagged arguments count from the frame.
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
@@ -242,7 +243,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Push(r8);
// Call the function.
- __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION);
+ __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall);
// ----------- S t a t e -------------
// -- rax constructor result
@@ -383,8 +384,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Initialize the pointer cage base register.
- // TODO(syg): Actually make a cage.
- __ movq(kPointerCageBaseRegister, arg_reg_1);
+ __ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
#endif
}
@@ -560,7 +561,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rdx : new_target
// Clear the context before we push it when entering the internal frame.
- __ Set(rsi, 0);
+ __ Move(rsi, 0);
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
@@ -687,9 +688,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreTaggedField(
FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax);
__ RecordWriteField(rdx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx,
- kDontSaveFPRegs);
+ SaveFPRegsMode::kIgnore);
- Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
// Load suspended function and context.
__ LoadTaggedPointerField(
@@ -740,20 +741,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
{
- {
- Label done_loop, loop;
- __ movq(r9, rcx);
-
- __ bind(&loop);
- __ decq(r9);
- __ j(less, &done_loop, Label::kNear);
- __ PushTaggedAnyField(
- FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize),
- decompr_scratch1);
- __ jmp(&loop);
-
- __ bind(&done_loop);
- }
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ decq(rcx);
+ __ j(less, &done_loop, Label::kNear);
+ __ PushTaggedAnyField(
+ FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
+ decompr_scratch1);
+ __ jmp(&loop);
+ __ bind(&done_loop);
// Push the receiver.
__ PushTaggedPointerField(
@@ -841,7 +837,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
optimized_code);
__ movq(scratch1, optimized_code); // Write barrier clobbers scratch1 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit,
+ SmiCheck::kOmit);
}
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
@@ -1084,7 +1081,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
__ LoadAnyTaggedField(
optimized_code_entry,
FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset));
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15, jump_mode);
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8, r15, jump_mode);
}
// Generate code for entering a JS function with the interpreter.
@@ -1236,10 +1233,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Move(
kInterpreterDispatchTableRegister,
ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
- __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movzxbq(kScratchRegister,
+ Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movq(kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, r11,
+ Operand(kInterpreterDispatchTableRegister, kScratchRegister,
times_system_pointer_size, 0));
__ call(kJavaScriptCallCodeStartRegister);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1259,7 +1257,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister, times_1, 0));
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- r11, &do_return);
+ r8, &do_return);
__ jmp(&do_dispatch);
__ bind(&do_return);
@@ -1558,15 +1556,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Dispatch to the target bytecode.
- __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister,
- kInterpreterBytecodeOffsetRegister, times_1, 0));
+ __ movzxbq(kScratchRegister,
+ Operand(kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, times_1, 0));
__ movq(kJavaScriptCallCodeStartRegister,
- Operand(kInterpreterDispatchTableRegister, r11,
+ Operand(kInterpreterDispatchTableRegister, kScratchRegister,
times_system_pointer_size, 0));
__ jmp(kJavaScriptCallCodeStartRegister);
}
-void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
// Get bytecode array and bytecode offset from the stack frame.
__ movq(kInterpreterBytecodeArrayRegister,
Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
@@ -1587,7 +1586,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, rbx, rcx,
- r11, &if_return);
+ r8, &if_return);
__ bind(&enter_bytecode);
// Convert new bytecode offset to a Smi and save in the stackframe.
@@ -1611,29 +1610,38 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
-void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ Register feedback_vector = r8;
+ Register optimization_state = rcx;
+ Register return_address = r15;
+
+#ifdef DEBUG
+ for (auto reg : BaselineOutOfLinePrologueDescriptor::registers()) {
+ DCHECK(
+ !AreAliased(feedback_vector, optimization_state, return_address, reg));
+ }
+#endif
+
auto descriptor = Builtins::CallInterfaceDescriptorFor(
Builtins::kBaselineOutOfLinePrologue);
Register closure = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kClosure);
// Load the feedback vector from the closure.
- Register feedback_vector = r11;
__ LoadTaggedPointerField(
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
__ LoadTaggedPointerField(feedback_vector,
FieldOperand(feedback_vector, Cell::kValueOffset));
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
}
// Check for an optimization marker.
- Register optimization_state = rcx;
Label has_optimized_code_or_marker;
LoadOptimizationStateAndJumpIfNeedsProcessing(
masm, optimization_state, feedback_vector, &has_optimized_code_or_marker);
@@ -1642,8 +1650,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ incl(
FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset));
- Register return_address = r15;
-
__ RecordComment("[ Frame Setup");
// Save the return address, so that we can push it to the end of the newly
// set-up frame once we're done setting it up.
@@ -1723,8 +1729,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// return since we may do a runtime call along the way that requires the
// stack to only contain valid frames.
__ Drop(1);
- MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, rcx, feedback_vector,
- JumpMode::kPushAndReturn);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ masm, optimization_state, feedback_vector, JumpMode::kPushAndReturn);
__ Trap();
__ RecordComment("]");
}
@@ -1840,7 +1846,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
Register optimized_code_entry = kJavaScriptCallCodeStartRegister;
- TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15,
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8, r15,
JumpMode::kJump);
}
@@ -1905,7 +1911,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Function.prototype.apply() yet, we use a normal Call builtin here.
__ bind(&no_arguments);
{
- __ Set(rax, 0);
+ __ Move(rax, 0);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -2062,6 +2068,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
}
// static
+// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@@ -2072,14 +2079,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- rdx : new.target (for [[Construct]])
// -- rsp[0] : return address
// -----------------------------------
- Register scratch = r11;
- if (masm->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0.
Label ok, fail;
__ AssertNotSmi(rbx);
Register map = r9;
- __ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ LoadMap(map, rbx);
__ CmpInstanceType(map, FIXED_ARRAY_TYPE);
__ j(equal, &ok);
__ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE);
@@ -2101,13 +2107,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// including the receiver and the return address.
{
Label copy, check;
- Register src = r8, dest = rsp, num = r9, current = r11;
+ Register src = r8, dest = rsp, num = r9, current = r12;
__ movq(src, rsp);
__ leaq(kScratchRegister, Operand(rcx, times_system_pointer_size, 0));
__ AllocateStackSpace(kScratchRegister);
__ leaq(num, Operand(rax, 2)); // Number of words to copy.
// +2 for receiver and return address.
- __ Set(current, 0);
+ __ Move(current, 0);
__ jmp(&check);
__ bind(&copy);
__ movq(kScratchRegister,
@@ -2123,9 +2129,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Copy the additional arguments onto the stack.
{
- Register value = scratch;
+ Register value = r12;
Register src = rbx, dest = r8, num = rcx, current = r9;
- __ Set(current, 0);
+ __ Move(current, 0);
Label done, push, loop;
__ bind(&loop);
__ cmpl(current, num);
@@ -2166,7 +2172,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
if (mode == CallOrConstructMode::kConstruct) {
Label new_target_constructor, new_target_not_constructor;
__ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear);
- __ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ LoadMap(rbx, rdx);
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(not_zero, &new_target_constructor, Label::kNear);
@@ -2203,13 +2209,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// including the receiver and the return address.
{
Label copy, check;
- Register src = r9, dest = rsp, num = r12, current = r11;
+ Register src = r9, dest = rsp, num = r12, current = r15;
__ movq(src, rsp);
__ leaq(kScratchRegister, Operand(r8, times_system_pointer_size, 0));
__ AllocateStackSpace(kScratchRegister);
__ leaq(num, Operand(rax, 2)); // Number of words to copy.
// +2 for receiver and return address.
- __ Set(current, 0);
+ __ Move(current, 0);
__ jmp(&check);
__ bind(&copy);
__ movq(kScratchRegister,
@@ -2359,7 +2365,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ InvokeFunctionCode(rdi, no_reg, rbx, rax, JUMP_FUNCTION);
+ __ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
__ bind(&class_constructor);
@@ -2592,7 +2598,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(rdi, &non_constructor);
// Check if target has a [[Construct]] internal method.
- __ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ LoadMap(rcx, rdi);
__ testb(FieldOperand(rcx, Map::kBitFieldOffset),
Immediate(Map::Bits1::IsConstructorBit::kMask));
__ j(zero, &non_constructor);
@@ -2682,15 +2688,17 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ movq(kContextRegister,
+ MemOperand(rbp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
}
#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was pushed to the stack by the caller as int32.
- __ Pop(r11);
+ __ Pop(r15);
// Convert to Smi for the runtime call.
- __ SmiTag(r11);
+ __ SmiTag(r15);
{
HardAbortScope hard_abort(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
@@ -2717,13 +2725,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// Push the Wasm instance as an explicit argument to WasmCompileLazy.
__ Push(kWasmInstanceRegister);
// Push the function index as second argument.
- __ Push(r11);
+ __ Push(r15);
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(kContextRegister, Smi::zero());
__ CallRuntime(Runtime::kWasmCompileLazy, 2);
// The entrypoint address is the return value.
- __ movq(r11, kReturnRegister0);
+ __ movq(r15, kReturnRegister0);
// Restore registers.
for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) {
@@ -2737,7 +2745,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
}
}
// Finally, jump to the entrypoint.
- __ jmp(r11);
+ __ jmp(r15);
}
void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
@@ -2915,7 +2923,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ LoadExternalPointerField(
signature,
FieldOperand(foreign_signature, Foreign::kForeignAddressOffset),
- kForeignForeignAddressTag);
+ kForeignForeignAddressTag, kScratchRegister);
foreign_signature = no_reg;
Register return_count = r8;
__ movq(return_count,
@@ -3243,28 +3251,17 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
thread_in_wasm_flag_addr,
MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset()));
__ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1));
-
- Register jump_table_start = thread_in_wasm_flag_addr;
- __ movq(jump_table_start,
- MemOperand(wasm_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kJumpTableStartOffset)));
thread_in_wasm_flag_addr = no_reg;
- Register jump_table_offset = function_data;
- __ LoadAnyTaggedField(
- jump_table_offset,
- MemOperand(
- function_data,
- WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag));
-
- // Change from smi to integer.
- __ SmiUntag(jump_table_offset);
-
- Register function_entry = jump_table_offset;
- __ addq(function_entry, jump_table_start);
- jump_table_offset = no_reg;
- jump_table_start = no_reg;
+ Register function_entry = function_data;
+ Register scratch = r12;
+ __ LoadExternalPointerField(
+ function_entry,
+ FieldOperand(function_data,
+ WasmExportedFunctionData::kForeignAddressOffset),
+ kForeignForeignAddressTag, scratch);
+ function_data = no_reg;
+ scratch = no_reg;
// We set the indicating value for the GC to the proper one for Wasm call.
constexpr int kWasmCallGCScanSlotCount = 0;
@@ -3349,6 +3346,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// Param conversion builtins.
// -------------------------------------------
__ bind(&convert_param);
+ // Restore function_data register (which was clobbered by the code above,
+ // but was valid when jumping here earlier).
+ function_data = rdi;
// The order of pushes is important. We want the heap objects, that should be
// scanned by GC, to be on the top of the stack.
// We have to set the indicating value for the GC to the number of values on
@@ -3527,6 +3527,13 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
__ jmp(&compile_wrapper_done);
}
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset);
+ __ movq(kScratchRegister, OSRTargetSlot);
+ __ movq(OSRTargetSlot, Immediate(0));
+ __ jmp(kScratchRegister);
+}
+
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
@@ -3538,7 +3545,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// rsp: stack pointer (restored after C call)
// rsi: current context (restored)
//
- // If argv_mode == kArgvInRegister:
+ // If argv_mode == ArgvMode::kRegister:
// r15: pointer to the first argument
#ifdef V8_TARGET_OS_WIN
@@ -3569,15 +3576,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
int arg_stack_space =
kArgExtraStackSpace +
(result_size <= kMaxRegisterResultSize ? 0 : result_size);
- if (argv_mode == kArgvInRegister) {
- DCHECK(save_doubles == kDontSaveFPRegs);
+ if (argv_mode == ArgvMode::kRegister) {
+ DCHECK(save_doubles == SaveFPRegsMode::kIgnore);
DCHECK(!builtin_exit_frame);
__ EnterApiExitFrame(arg_stack_space);
// Move argc into r12 (argv is already in r15).
__ movq(r12, rax);
} else {
__ EnterExitFrame(
- arg_stack_space, save_doubles == kSaveFPRegs,
+ arg_stack_space, save_doubles == SaveFPRegsMode::kSave,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
}
@@ -3641,7 +3648,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
}
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack);
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave,
+ argv_mode == ArgvMode::kStack);
__ ret(0);
// Handling of exception.
@@ -3866,9 +3874,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
Register map = rcx;
__ JumpIfSmi(return_value, &ok, Label::kNear);
- __ LoadTaggedPointerField(map,
- FieldOperand(return_value, HeapObject::kMapOffset));
-
+ __ LoadMap(map, return_value);
__ CmpInstanceType(map, LAST_NAME_TYPE);
__ j(below_equal, &ok, Label::kNear);
@@ -4053,7 +4059,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
Register holder = ApiGetterDescriptor::HolderRegister();
Register callback = ApiGetterDescriptor::CallbackRegister();
Register scratch = rax;
- Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg;
+ Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r15 : no_reg;
DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1));
@@ -4116,7 +4122,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
__ LoadExternalPointerField(
api_function_address,
FieldOperand(scratch, Foreign::kForeignAddressOffset),
- kForeignForeignAddressTag);
+ kForeignForeignAddressTag, kScratchRegister);
// +3 is to skip prolog, return address and name handle.
Operand return_value_operand(
@@ -4172,7 +4178,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
// We use this to keep the value of the fifth argument temporarily.
// Unfortunately we can't store it directly in r8 (used for passing
// this on linux), since it is another parameter passing register on windows.
- Register arg5 = r11;
+ Register arg5 = r15;
__ movq(arg_reg_3, Immediate(Deoptimizer::kFixedExitSizeMarker));
// Get the address of the location in the code object
@@ -4192,7 +4198,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm,
__ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ movq(arg_reg_1, rax);
- __ Set(arg_reg_2, static_cast<int>(deopt_kind));
+ __ Move(arg_reg_2, static_cast<int>(deopt_kind));
// Args 3 and 4 are already in the right registers.
// On windows put the arguments on the stack (PrepareCallCFunction
diff --git a/chromium/v8/src/codegen/arm/assembler-arm-inl.h b/chromium/v8/src/codegen/arm/assembler-arm-inl.h
index 7035fa2492b..f72e27703e9 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm-inl.h
+++ b/chromium/v8/src/codegen/arm/assembler-arm-inl.h
@@ -206,7 +206,7 @@ Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) {
Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {}
void Assembler::CheckBuffer() {
- if (buffer_space() <= kGap) {
+ if (V8_UNLIKELY(buffer_space() <= kGap)) {
GrowBuffer();
}
MaybeCheckConstPool();
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.cc b/chromium/v8/src/codegen/arm/assembler-arm.cc
index 17a20a6f977..09c57928ffa 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/assembler-arm.cc
@@ -534,9 +534,8 @@ Assembler::Assembler(const AssemblerOptions& options,
: AssemblerBase(options, std::move(buffer)),
pending_32_bit_constants_(),
scratch_register_list_(ip.bit()) {
- pending_32_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
- next_buffer_check_ = 0;
+ constant_pool_deadline_ = kMaxInt;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
@@ -556,7 +555,10 @@ Assembler::Assembler(const AssemblerOptions& options,
}
}
-Assembler::~Assembler() { DCHECK_EQ(const_pool_blocked_nesting_, 0); }
+Assembler::~Assembler() {
+ DCHECK_EQ(const_pool_blocked_nesting_, 0);
+ DCHECK_EQ(first_const_pool_32_use_, -1);
+}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
SafepointTableBuilder* safepoint_table_builder,
@@ -841,7 +843,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
// orr dst, dst, #target8_2 << 16
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- DCHECK(is_uint24(target24));
+ CHECK(is_uint24(target24));
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
@@ -897,7 +899,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
instr &= ~kImm24Mask;
}
int imm24 = imm26 >> 2;
- DCHECK(is_int24(imm24));
+ CHECK(is_int24(imm24));
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
@@ -1030,10 +1032,53 @@ namespace {
bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
Instr* instr) {
// imm32 must be unsigned.
- for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
- if ((imm8 <= 0xFF)) {
- *rotate_imm = rot;
+ {
+ // 32-bit immediates can be encoded as:
+ // (8-bit value, 2*N bit left rotation)
+ // e.g. 0xab00 can be encoded as 0xab shifted left by 8 == 2*4, i.e.
+ // (0xab, 4)
+ //
+ // Check three categories which cover all possible shifter fits:
+ // 1. 0x000000FF: The value is already 8-bit (no shifting necessary),
+ // 2. 0x000FF000: The 8-bit value is somewhere in the middle of the 32-bit
+ // value, and
+ // 3. 0xF000000F: The 8-bit value is split over the beginning and end of
+ // the 32-bit value.
+
+ // For 0x000000FF.
+ if (imm32 <= 0xFF) {
+ *rotate_imm = 0;
+ *immed_8 = imm32;
+ return true;
+ }
+ // For 0x000FF000, count trailing zeros and shift down to 0x000000FF. Note
+ // that we have to round the trailing zeros down to the nearest multiple of
+ // two, since we can only encode shifts of 2*N. Note also that we know that
+ // imm32 isn't zero, since we already checked if it's less than 0xFF.
+ int half_trailing_zeros = base::bits::CountTrailingZerosNonZero(imm32) / 2;
+ uint32_t imm8 = imm32 >> (half_trailing_zeros * 2);
+ if (imm8 <= 0xFF) {
+ DCHECK_GT(half_trailing_zeros, 0);
+ // Rotating right by trailing_zeros is equivalent to rotating left by
+ // 32 - trailing_zeros. We return rotate_right / 2, so calculate
+ // (32 - trailing_zeros)/2 == 16 - trailing_zeros/2.
+ *rotate_imm = (16 - half_trailing_zeros);
+ *immed_8 = imm8;
+ return true;
+ }
+ // For 0xF000000F, rotate by 16 to get 0x000FF000 and continue as if it
+ // were that case.
+ uint32_t imm32_rot16 = base::bits::RotateLeft32(imm32, 16);
+ half_trailing_zeros =
+ base::bits::CountTrailingZerosNonZero(imm32_rot16) / 2;
+ imm8 = imm32_rot16 >> (half_trailing_zeros * 2);
+ if (imm8 <= 0xFF) {
+ // We've rotated left by 2*8, so we can't have more than that many
+ // trailing zeroes.
+ DCHECK_LT(half_trailing_zeros, 8);
+ // We've already rotated by 2*8, before calculating trailing_zeros/2,
+ // so we need (32 - (16 + trailing_zeros))/2 == 8 - trailing_zeros/2.
+ *rotate_imm = 8 - half_trailing_zeros;
*immed_8 = imm8;
return true;
}
@@ -2258,7 +2303,7 @@ void Assembler::bkpt(uint32_t imm16) {
}
void Assembler::svc(uint32_t imm24, Condition cond) {
- DCHECK(is_uint24(imm24));
+ CHECK(is_uint24(imm24));
emit(cond | 15 * B24 | imm24);
}
@@ -5204,8 +5249,13 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
(rmode == RelocInfo::CODE_TARGET && value != 0) ||
(RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
- if (pending_32_bit_constants_.empty()) {
+ if (first_const_pool_32_use_ < 0) {
+ DCHECK(pending_32_bit_constants_.empty());
+ DCHECK_EQ(constant_pool_deadline_, kMaxInt);
first_const_pool_32_use_ = position;
+ constant_pool_deadline_ = position + kCheckPoolDeadline;
+ } else {
+ DCHECK(!pending_32_bit_constants_.empty());
}
ConstantPoolEntry entry(position, value, sharing_ok, rmode);
@@ -5224,7 +5274,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
- pending_32_bit_constants_.push_back(entry);
+ pending_32_bit_constants_.emplace_back(entry);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
@@ -5239,17 +5289,17 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
- // Max pool start (if we need a jump and an alignment).
-#ifdef DEBUG
- int start = pc_limit + kInstrSize + 2 * kPointerSize;
- DCHECK(pending_32_bit_constants_.empty() ||
- (start < first_const_pool_32_use_ + kMaxDistToIntPool));
-#endif
no_const_pool_before_ = pc_limit;
}
- if (next_buffer_check_ < no_const_pool_before_) {
- next_buffer_check_ = no_const_pool_before_;
+ // If we're due a const pool check before the block finishes, move it to just
+ // after the block.
+ if (constant_pool_deadline_ < no_const_pool_before_) {
+ // Make sure that the new deadline isn't too late (including a jump and the
+ // constant pool marker).
+ DCHECK_LE(no_const_pool_before_,
+ first_const_pool_32_use_ + kMaxDistToIntPool);
+ constant_pool_deadline_ = no_const_pool_before_;
}
}
@@ -5265,49 +5315,44 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// There is nothing to do if there are no pending constant pool entries.
if (pending_32_bit_constants_.empty()) {
- // Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ // We should only fall into this case if we're either trying to forcing
+ // emission or opportunistically checking after a jump.
+ DCHECK(force_emit || !require_jump);
return;
}
- // Check that the code buffer is large enough before emitting the constant
- // pool (include the jump over the pool and the constant pool marker and
- // the gap to the relocation information).
- int jump_instr = require_jump ? kInstrSize : 0;
- int size_up_to_marker = jump_instr + kInstrSize;
- int estimated_size_after_marker =
- pending_32_bit_constants_.size() * kPointerSize;
- int estimated_size = size_up_to_marker + estimated_size_after_marker;
-
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance from the first instruction accessing the constant pool to
- // any of the constant pool entries will exceed its limit the next
- // time the pool is checked. This is overly restrictive, but we don't emit
- // constant pool entries in-order so it's conservatively correct.
+ // the first constant pool entry will exceed its limit the next time the
+ // pool is checked.
// * the instruction doesn't require a jump after itself to jump over the
// constant pool, and we're getting close to running out of range.
if (!force_emit) {
- DCHECK(!pending_32_bit_constants_.empty());
- bool need_emit = false;
- int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
- if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
- (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
- need_emit = true;
+ DCHECK_NE(first_const_pool_32_use_, -1);
+ int dist32 = pc_offset() - first_const_pool_32_use_;
+ if (require_jump) {
+ // We should only be on this path if we've exceeded our deadline.
+ DCHECK_GE(dist32, kCheckPoolDeadline);
+ } else if (dist32 < kCheckPoolDeadline / 2) {
+ return;
}
- if (!need_emit) return;
}
- // Deduplicate constants.
- int size_after_marker = estimated_size_after_marker;
+ int size_after_marker = pending_32_bit_constants_.size() * kPointerSize;
+ // Deduplicate constants.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
if (entry.is_merged()) size_after_marker -= kPointerSize;
}
+ // Check that the code buffer is large enough before emitting the constant
+ // pool (include the jump over the pool and the constant pool marker and
+ // the gap to the relocation information).
+ int jump_instr = require_jump ? kInstrSize : 0;
+ int size_up_to_marker = jump_instr + kInstrSize;
int size = size_up_to_marker + size_after_marker;
-
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
@@ -5331,6 +5376,14 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
emit(kConstantPoolMarker |
EncodeConstantPoolLength(size_after_marker / kPointerSize));
+ // The first entry in the constant pool should also be the first
+ CHECK_EQ(first_const_pool_32_use_, pending_32_bit_constants_[0].position());
+ CHECK(!pending_32_bit_constants_[0].is_merged());
+
+ // Make sure we're not emitting the constant too late.
+ CHECK_LE(pc_offset(),
+ first_const_pool_32_use_ + kMaxDistToPcRelativeConstant);
+
// Emit 32-bit constant pool entries.
for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
ConstantPoolEntry& entry = pending_32_bit_constants_[i];
@@ -5354,6 +5407,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
ConstantPoolEntry& merged =
pending_32_bit_constants_[entry.merged_index()];
DCHECK(entry.value() == merged.value());
+ DCHECK_LT(merged.position(), entry.position());
Instr merged_instr = instr_at(merged.position());
DCHECK(IsLdrPcImmediateOffset(merged_instr));
delta = GetLdrRegisterImmediateOffset(merged_instr);
@@ -5379,9 +5433,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
}
- // Since a constant pool was just emitted, move the check offset forward by
- // the standard interval.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ // Since a constant pool was just emitted, we don't need another check until
+ // the next constant pool entry is added.
+ constant_pool_deadline_ = kMaxInt;
}
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
diff --git a/chromium/v8/src/codegen/arm/assembler-arm.h b/chromium/v8/src/codegen/arm/assembler-arm.h
index e0490a68533..04d5eef054d 100644
--- a/chromium/v8/src/codegen/arm/assembler-arm.h
+++ b/chromium/v8/src/codegen/arm/assembler-arm.h
@@ -45,6 +45,7 @@
#include <memory>
#include <vector>
+#include "src/base/small-vector.h"
#include "src/codegen/arm/constants-arm.h"
#include "src/codegen/arm/register-arm.h"
#include "src/codegen/assembler.h"
@@ -310,7 +311,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
~Assembler() override;
- void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); }
+ void AbortedCodeGeneration() override {
+ pending_32_bit_constants_.clear();
+ first_const_pool_32_use_ = -1;
+ }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
@@ -1148,13 +1152,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int DecodeShiftImm(Instr instr);
static Instr PatchShiftImm(Instr instr, int immed);
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
+ // Constants are accessed via pc relative addressing, which can reach −4095 to
+ // 4095 for integer PC-relative loads, and −1020 to 1020 for floating-point
// PC-relative loads, thereby defining a maximum distance between the
- // instruction and the accessed constant.
- static constexpr int kMaxDistToIntPool = 4 * KB;
- // All relocations could be integer, it therefore acts as the limit.
- static constexpr int kMinNumPendingConstants = 4;
+ // instruction and the accessed constant. Additionally, PC-relative loads
+ // start at a delta from the actual load instruction's PC, so we can add this
+ // on to the (positive) distance.
+ static constexpr int kMaxDistToPcRelativeConstant =
+ 4095 + Instruction::kPcLoadDelta;
+ // The constant pool needs to be jumped over, and has a marker, so the actual
+ // distance from the instruction and start of the constant pool has to include
+ // space for these two instructions.
+ static constexpr int kMaxDistToIntPool =
+ kMaxDistToPcRelativeConstant - 2 * kInstrSize;
+ // Experimentally derived as sufficient for ~95% of compiles.
+ static constexpr int kTypicalNumPending32Constants = 32;
+ // The maximum number of pending constants is reached by a sequence of only
+ // constant loads, which limits it to the number of constant loads that can
+ // fit between the first constant load and the distance to the constant pool.
static constexpr int kMaxNumPending32Constants =
kMaxDistToIntPool / kInstrSize;
@@ -1165,8 +1180,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
- void MaybeCheckConstPool() {
- if (pc_offset() >= next_buffer_check_) {
+ V8_INLINE void MaybeCheckConstPool() {
+ if (V8_UNLIKELY(pc_offset() >= constant_pool_deadline_)) {
CheckConstPool(false, true);
}
}
@@ -1192,9 +1207,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
- // Prevent constant pool checks happening by setting the next check to
- // the biggest possible offset.
- next_buffer_check_ = kMaxInt;
+ // Prevent constant pool checks happening by resetting the deadline.
+ constant_pool_deadline_ = kMaxInt;
}
}
@@ -1202,19 +1216,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
+ if (first_const_pool_32_use_ >= 0) {
#ifdef DEBUG
- // Max pool start (if we need a jump and an alignment).
- int start = pc_offset() + kInstrSize + 2 * kPointerSize;
- // Check the constant pool hasn't been blocked for too long.
- DCHECK(pending_32_bit_constants_.empty() ||
- (start < first_const_pool_32_use_ + kMaxDistToIntPool));
+ // Check the constant pool hasn't been blocked for too long.
+ DCHECK_LE(pc_offset(), first_const_pool_32_use_ + kMaxDistToIntPool);
#endif
- // Two cases:
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is
- // still blocked
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will
- // trigger a check.
- next_buffer_check_ = no_const_pool_before_;
+ // Reset the constant pool check back to the deadline.
+ constant_pool_deadline_ = first_const_pool_32_use_ + kCheckPoolDeadline;
+ }
}
}
@@ -1258,7 +1267,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
- std::vector<ConstantPoolEntry> pending_32_bit_constants_;
+ base::SmallVector<ConstantPoolEntry, kTypicalNumPending32Constants>
+ pending_32_bit_constants_;
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
@@ -1268,8 +1278,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
- int next_buffer_check_; // pc offset of next buffer check
-
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
// jumps or after returns from functions (in dead code locations).
@@ -1281,11 +1289,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// if so, a relocation info entry is associated to the constant pool entry.
// Repeated checking whether the constant pool should be emitted is rather
- // expensive. By default we only check again once a number of instructions
- // has been generated. That also means that the sizing of the buffers is not
- // an exact science, and that we rely on some slop to not overrun buffers.
- static constexpr int kCheckPoolIntervalInst = 32;
- static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
+ // expensive. Instead, we check once a deadline is hit; the deadline being
+ // when there is a possibility that MaybeCheckConstPool won't be called before
+ // kMaxDistToIntPoolWithHeader is exceeded. Since MaybeCheckConstPool is
+ // called in CheckBuffer, this means that kGap is an upper bound on this
+ // check. Use 2 * kGap just to give it some slack around BlockConstPoolScopes.
+ static constexpr int kCheckPoolDeadline = kMaxDistToIntPool - 2 * kGap;
+
+ // pc offset of the upcoming constant pool deadline. Equivalent to
+ // first_const_pool_32_use_ + kCheckPoolDeadline.
+ int constant_pool_deadline_;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -1298,7 +1311,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- inline void CheckBuffer();
+ V8_INLINE void CheckBuffer();
void GrowBuffer();
// Instruction generation
diff --git a/chromium/v8/src/codegen/arm/cpu-arm.cc b/chromium/v8/src/codegen/arm/cpu-arm.cc
index 47fe4bdb740..88491c5e51c 100644
--- a/chromium/v8/src/codegen/arm/cpu-arm.cc
+++ b/chromium/v8/src/codegen/arm/cpu-arm.cc
@@ -6,7 +6,7 @@
#ifdef __arm__
#ifdef __QNXNTO__
#include <sys/mman.h> // for cache flushing.
-#undef MAP_TYPE // NOLINT
+#undef MAP_TYPE
#elif V8_OS_FREEBSD
#include <machine/sysarch.h> // for cache flushing
#include <sys/types.h>
diff --git a/chromium/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/chromium/v8/src/codegen/arm/interface-descriptors-arm-inl.h
new file mode 100644
index 00000000000..296f72d1578
--- /dev/null
+++ b/chromium/v8/src/codegen/arm/interface-descriptors-arm-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
+#define V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(r0, r1, r2, r3, r4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return r1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return r2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return r0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return r4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return r1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return r2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return r0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return r4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return r0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return r3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return r3;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // r0 : number of arguments
+ // r1 : the target to call
+ return RegisterArray(r1, r0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r4 : arguments list length (untagged)
+ // r2 : arguments list (FixedArray)
+ return RegisterArray(r1, r0, r4, r2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // r0 : number of arguments
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // r1 : function template info
+ // r2 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(r1, r2);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r2 : the object to spread
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // r1 : the target to call
+ // r2 : the arguments list
+ return RegisterArray(r1, r2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r4 : arguments list length (untagged)
+ // r2 : arguments list (FixedArray)
+ return RegisterArray(r1, r3, r0, r4, r2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // r0 : number of arguments
+ // r3 : the new target
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // r0 : number of arguments (on the stack, not including receiver)
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the object to spread
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : the arguments list
+ return RegisterArray(r1, r3, r2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // r0 : number of arguments
+ // r1 : the target to call
+ // r3 : the new target
+ // r2 : allocation site or undefined
+ return RegisterArray(r1, r3, r0, r2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(r1); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(r1, r0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r1, r0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // r1: left operand
+ // r0: right operand
+ // r2: feedback slot
+ return RegisterArray(r1, r0, r2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(r1, // kApiFunctionAddress
+ r2, // kArgc
+ r3, // kCallData
+ r0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(r0, // argument count (not including receiver)
+ r2, // address of first argument
+ r1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ r0, // argument count (not including receiver)
+ r4, // address of the first argument
+ r1, // constructor to call
+ r3, // new target
+ r2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(r0, // the value to pass to the generator
+ r1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(r0, r1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM
+
+#endif // V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_
diff --git a/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc b/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
deleted file mode 100644
index 53992227ab3..00000000000
--- a/chromium/v8/src/codegen/arm/interface-descriptors-arm.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {r0, r1, r2, r3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return r1; }
-const Register LoadDescriptor::NameRegister() { return r2; }
-const Register LoadDescriptor::SlotRegister() { return r0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return r4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return r1; }
-const Register StoreDescriptor::NameRegister() { return r2; }
-const Register StoreDescriptor::ValueRegister() { return r0; }
-const Register StoreDescriptor::SlotRegister() { return r4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return r3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
-const Register StoreTransitionDescriptor::MapRegister() { return r5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return r0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return r3; }
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r1 : the target to call
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r4 : arguments list length (untagged)
- // r2 : arguments list (FixedArray)
- Register registers[] = {r1, r0, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r2 : start index (to support rest parameters)
- // r1 : the target to call
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : function template info
- // r2 : number of arguments (on the stack, not including receiver)
- Register registers[] = {r1, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r2 : the object to spread
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : the target to call
- // r2 : the arguments list
- Register registers[] = {r1, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r3 : the new target
- // r4 : arguments list length (untagged)
- // r2 : arguments list (FixedArray)
- Register registers[] = {r1, r3, r0, r4, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r3 : the new target
- // r2 : start index (to support rest parameters)
- // r1 : the target to call
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments (on the stack, not including receiver)
- // r1 : the target to call
- // r3 : the new target
- // r2 : the object to spread
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1 : the target to call
- // r3 : the new target
- // r2 : the arguments list
- Register registers[] = {r1, r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r0 : number of arguments
- // r1 : the target to call
- // r3 : the new target
- // r2 : allocation site or undefined
- Register registers[] = {r1, r3, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1: left operand
- // r0: right operand
- // r2: feedback slot
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r1: left operand
- // r0: right operand
- // r2: feedback slot
- Register registers[] = {r1, r0, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // kApiFunctionAddress
- r2, // kArgc
- r3, // kCallData
- r0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // argument count (not including receiver)
- r2, // address of first argument
- r1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // argument count (not including receiver)
- r4, // address of the first argument
- r1, // constructor to call
- r3, // new target
- r2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r0, // the value to pass to the generator
- r1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r0, r1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
index f83eee4a919..d4e12f3092d 100644
--- a/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -13,6 +13,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -59,7 +60,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -85,7 +86,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
SaveFPRegs(sp, lr);
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -96,7 +97,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
RestoreFPRegs(sp, lr);
bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes;
}
@@ -660,7 +661,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -668,7 +669,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so so offset must be a multiple of kPointerSize.
DCHECK(IsAligned(offset, kPointerSize));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -680,7 +681,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
- save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ save_fp, remembered_set_action, SmiCheck::kOmit);
bind(&done);
}
@@ -826,7 +827,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK_NE(object, value);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
{
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -837,7 +838,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -847,7 +848,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -1435,7 +1436,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
PushCommonFrame(scratch);
// Reserve room for saved entry sp.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(scratch, Operand::Zero());
str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1622,7 +1623,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// r0: actual arguments count
// r1: function (passed through to callee)
@@ -1722,9 +1723,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == r3);
@@ -1746,17 +1747,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
b(&done);
@@ -1773,9 +1776,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r1.
DCHECK_EQ(fun, r1);
@@ -1790,15 +1793,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r1.
DCHECK_EQ(function, r1);
@@ -1807,18 +1810,7 @@ void MacroAssembler::InvokeFunction(Register function,
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
InvokeFunctionCode(r1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
-}
-
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Move(r1, restart_fp);
- ldr(r1, MemOperand(r1));
- tst(r1, r1);
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
+ actual_parameter_count, type);
}
void MacroAssembler::PushStackHandler() {
@@ -1993,8 +1985,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
DCHECK_EQ(builtin.address() & 1, 1);
#endif
Move(r1, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -2034,11 +2026,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
- if (emit_debug_code()) Check(cond, reason);
+ if (FLAG_debug_code) Check(cond, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cond, AbortReason reason) {
@@ -2052,11 +2044,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -2143,7 +2135,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmi);
@@ -2151,7 +2143,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(eq, AbortReason::kOperandIsNotASmi);
@@ -2159,7 +2151,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
@@ -2173,7 +2165,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
@@ -2187,7 +2179,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
@@ -2199,7 +2191,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -2229,7 +2221,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, RootIndex::kUndefinedValue);
@@ -2520,7 +2512,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// running in the simulator. The simulator has its own alignment check which
// provides more information.
#if V8_HOST_ARCH_ARM
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
diff --git a/chromium/v8/src/codegen/arm/macro-assembler-arm.h b/chromium/v8/src/codegen/arm/macro-assembler-arm.h
index e622d4aa172..3a54f6c45fc 100644
--- a/chromium/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/chromium/v8/src/codegen/arm/macro-assembler-arm.h
@@ -29,8 +29,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -656,16 +654,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot at |offset|
// has been written. |value| is the object being stored.
void RecordWrite(
Register object, Operand offset, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -689,7 +687,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger.
void CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -700,13 +698,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling
@@ -784,18 +779,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -874,7 +869,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h
index ee64dbe1f26..2668502f816 100644
--- a/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/assembler-arm64-inl.h
@@ -1072,12 +1072,12 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
inline void Assembler::CheckBufferSpace() {
DCHECK_LT(pc_, buffer_start_ + buffer_->size());
- if (buffer_space() < kGap) {
+ if (V8_UNLIKELY(buffer_space() < kGap)) {
GrowBuffer();
}
}
-inline void Assembler::CheckBuffer() {
+V8_INLINE void Assembler::CheckBuffer() {
CheckBufferSpace();
if (pc_offset() >= next_veneer_pool_check_) {
CheckVeneerPool(false, true);
@@ -1085,6 +1085,10 @@ inline void Assembler::CheckBuffer() {
constpool_.MaybeCheck();
}
+EnsureSpace::EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
+ assembler->CheckBufferSpace();
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/codegen/arm64/assembler-arm64.h b/chromium/v8/src/codegen/arm64/assembler-arm64.h
index aa2ffb26cdf..9d8b135954b 100644
--- a/chromium/v8/src/codegen/arm64/assembler-arm64.h
+++ b/chromium/v8/src/codegen/arm64/assembler-arm64.h
@@ -2634,7 +2634,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void GrowBuffer();
- void CheckBufferSpace();
+ V8_INLINE void CheckBufferSpace();
void CheckBuffer();
// Emission of the veneer pools may be blocked in some code sequences.
@@ -2786,9 +2786,7 @@ class PatchingAssembler : public Assembler {
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) {
- assembler->CheckBufferSpace();
- }
+ explicit V8_INLINE EnsureSpace(Assembler* assembler);
private:
Assembler::BlockPoolsScope block_pools_scope_;
diff --git a/chromium/v8/src/codegen/arm64/cpu-arm64.cc b/chromium/v8/src/codegen/arm64/cpu-arm64.cc
index d7bd4834b0e..4baf2e07ec4 100644
--- a/chromium/v8/src/codegen/arm64/cpu-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/cpu-arm64.cc
@@ -23,7 +23,7 @@ class CacheLineSizes {
cache_type_register_ = 0;
#else
// Copy the content of the cache type register to a core register.
- __asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT
+ __asm__ __volatile__("mrs %x[ctr], ctr_el0"
: [ctr] "=r"(cache_type_register_));
#endif
}
@@ -64,9 +64,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;
- __asm__ __volatile__( // NOLINT
- // Clean every line of the D cache containing the
- // target data.
+ __asm__ __volatile__(
+ // Clean every line of the D cache containing the target data.
"0: \n\t"
// dc : Data Cache maintenance
// c : Clean
@@ -111,7 +110,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
: [dsize] "r"(dsize), [isize] "r"(isize), [end] "r"(end)
// This code does not write to memory but without the dependency gcc might
// move this code before the code is generated.
- : "cc", "memory"); // NOLINT
+ : "cc", "memory");
#endif // V8_OS_WIN
#endif // V8_HOST_ARCH_ARM64
}
diff --git a/chromium/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/chromium/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
new file mode 100644
index 00000000000..90123dbdcb1
--- /dev/null
+++ b/chromium/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h
@@ -0,0 +1,265 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_
+#define V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/base/template-utils.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(x0, x1, x2, x3, x4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(x0, x1, x2, x3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return x1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return x2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return x0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return x4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return x1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return x2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return x0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return x4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return x5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return x0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return x3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return x3;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(x3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // x1: target
+ // x0: number of arguments
+ return RegisterArray(x1, x0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x4 : arguments list length (untagged)
+ // x2 : arguments list (FixedArray)
+ return RegisterArray(x1, x0, x4, x2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // x1: target
+ // x0: number of arguments
+ // x2: start index (to supported rest parameters)
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // x1 : function template info
+ // x2 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(x1, x2);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x2 : the object to spread
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // x1 : the target to call
+ // x2 : the arguments list
+ return RegisterArray(x1, x2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x3 : the new target
+ // x4 : arguments list length (untagged)
+ // x2 : arguments list (FixedArray)
+ return RegisterArray(x1, x3, x0, x4, x2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ // x2: start index (to supported rest parameters)
+ return RegisterArray(x1, x3, x0, x2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // x0 : number of arguments (on the stack, not including receiver)
+ // x1 : the target to call
+ // x3 : the new target
+ // x2 : the object to spread
+ return RegisterArray(x1, x3, x0, x2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // x1 : the target to call
+ // x3 : the new target
+ // x2 : the arguments list
+ return RegisterArray(x1, x3, x2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
+ // x2: allocation site or undefined
+ return RegisterArray(x1, x3, x0, x2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(x1); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ return RegisterArray(x1, x0);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ // x2: feedback slot
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ return RegisterArray(x1, x0);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // x1: left operand
+ // x0: right operand
+ // x2: feedback slot
+ return RegisterArray(x1, x0, x2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(x1, // kApiFunctionAddress
+ x2, // kArgc
+ x3, // kCallData
+ x0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(x0, // argument count (not including receiver)
+ x2, // address of first argument
+ x1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ x0, // argument count (not including receiver)
+ x4, // address of the first argument
+ x1, // constructor to call
+ x3, // new target
+ x2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(x0, // the value to pass to the generator
+ x1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(x0, x1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_ARM64
+
+#endif // V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_
diff --git a/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
deleted file mode 100644
index 246d6fc9610..00000000000
--- a/chromium/v8/src/codegen/arm64/interface-descriptors-arm64.cc
+++ /dev/null
@@ -1,310 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {x0, x1, x2, x3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {x0, x1, x2, x3, x4};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return x1; }
-const Register LoadDescriptor::NameRegister() { return x2; }
-const Register LoadDescriptor::SlotRegister() { return x0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return x4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return x1; }
-const Register StoreDescriptor::NameRegister() { return x2; }
-const Register StoreDescriptor::ValueRegister() { return x0; }
-const Register StoreDescriptor::SlotRegister() { return x4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return x3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return x4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
-const Register StoreTransitionDescriptor::MapRegister() { return x5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return x0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return x3; }
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: target
- // x0: number of arguments
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x4 : arguments list length (untagged)
- // x2 : arguments list (FixedArray)
- Register registers[] = {x1, x0, x4, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: target
- // x0: number of arguments
- // x2: start index (to supported rest parameters)
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1 : function template info
- // x2 : number of arguments (on the stack, not including receiver)
- Register registers[] = {x1, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x2 : the object to spread
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1 : the target to call
- // x2 : the arguments list
- Register registers[] = {x1, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x3 : the new target
- // x4 : arguments list length (untagged)
- // x2 : arguments list (FixedArray)
- Register registers[] = {x1, x3, x0, x4, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: new target
- // x1: target
- // x0: number of arguments
- // x2: start index (to supported rest parameters)
- Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x0 : number of arguments (on the stack, not including receiver)
- // x1 : the target to call
- // x3 : the new target
- // x2 : the object to spread
- Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1 : the target to call
- // x3 : the new target
- // x2 : the arguments list
- Register registers[] = {x1, x3, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x3: new target
- // x1: target
- // x0: number of arguments
- // x2: allocation site or undefined
- Register registers[] = {x1, x3, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- // x2: feedback slot
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- Register registers[] = {x1, x0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // x1: left operand
- // x0: right operand
- // x2: feedback slot
- Register registers[] = {x1, x0, x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x1, // kApiFunctionAddress
- x2, // kArgc
- x3, // kCallData
- x0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // argument count (not including receiver)
- x2, // address of first argument
- x1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // argument count (not including receiver)
- x4, // address of the first argument
- x1, // constructor to call
- x3, // new target
- x2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x0, // the value to pass to the generator
- x1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- x1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x0, x1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
index 6a33f864ab7..8fb9bbfd7b7 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64-inl.h
@@ -7,12 +7,12 @@
#include <ctype.h>
-#include "src/common/globals.h"
-
#include "src/base/bits.h"
#include "src/codegen/arm64/assembler-arm64-inl.h"
#include "src/codegen/arm64/assembler-arm64.h"
#include "src/codegen/macro-assembler.h"
+#include "src/common/globals.h"
+#include "src/execution/isolate-data.h"
namespace v8 {
namespace internal {
@@ -1037,7 +1037,7 @@ void TurboAssembler::InitializeRootRegister() {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Mov(kRootRegister, Operand(isolate_root));
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- Mov(kPointerCageBaseRegister, Operand(isolate_root));
+ LoadRootRelative(kPtrComprCageBaseRegister, IsolateData::cage_base_offset());
#endif
}
@@ -1200,7 +1200,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
- } else if (emit_debug_code()) {
+ } else if (FLAG_debug_code) {
Cmp(xzr, offset);
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
@@ -1212,7 +1212,7 @@ template <TurboAssembler::LoadLRMode lr_mode>
void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK_GE(offset.ImmediateValue(), 0);
- } else if (emit_debug_code()) {
+ } else if (FLAG_debug_code) {
Cmp(xzr, offset);
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
index a3570b80354..b18ff554553 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -10,6 +10,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reloc-info.h"
@@ -52,7 +53,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
int bytes = list.Count() * kXRegSizeInBits / 8;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
@@ -69,7 +70,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
int bytes = list.Count() * kXRegSizeInBits / 8;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
@@ -79,7 +80,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PopCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
@@ -1266,7 +1267,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
}
void TurboAssembler::AssertSpAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
HardAbortScope hard_abort(this); // Avoid calls to Abort.
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
@@ -1299,7 +1300,7 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode) {
DCHECK(!AreAliased(dst, src, count));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Register pointer1 = dst;
Register pointer2 = src;
if (mode == kSrcLessThanDst) {
@@ -1374,7 +1375,7 @@ void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label unexpected_mode, done;
UseScratchRegisterScope temps(this);
if (fpcr.IsNone()) {
@@ -1473,7 +1474,7 @@ void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
}
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
Check(eq, reason);
@@ -1481,7 +1482,7 @@ void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
}
void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
Tst(object, kSmiTagMask);
Check(ne, reason);
@@ -1489,7 +1490,7 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
UseScratchRegisterScope temps(this);
@@ -1504,7 +1505,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
UseScratchRegisterScope temps(this);
@@ -1517,7 +1518,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
UseScratchRegisterScope temps(this);
@@ -1529,7 +1530,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
// Load map
@@ -1555,7 +1556,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
Label done_checking;
@@ -1569,7 +1570,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
}
void TurboAssembler::AssertPositiveOrZero(Register value) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done;
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
Tbz(value, sign_bit, &done);
@@ -1599,8 +1600,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Mov(x1, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1942,7 +1943,7 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::TailCallBuiltin(int builtin_index) {
@@ -1971,7 +1972,7 @@ void TurboAssembler::TailCallBuiltin(int builtin_index) {
Ldr(temp, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(temp);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -2059,7 +2060,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
#endif
Poke(x17, 0);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
// Verify that the slot below fp[kSPOffset]-8 points to the signed return
// location.
Ldr(x16, MemOperand(fp, ExitFrameConstants::kSPOffset));
@@ -2189,7 +2190,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args,
void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register actual_argument_count, Label* done,
- InvokeFlag flag) {
+ InvokeType type) {
// x0: actual arguments count.
// x1: function (passed through to callee).
// x2: expected arguments count.
@@ -2320,9 +2321,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, x1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == x3);
@@ -2341,7 +2342,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// If actual != expected, InvokePrologue will have handled the call through
// the argument adaptor mechanism.
@@ -2352,11 +2353,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register code = kJavaScriptCallCodeStartRegister;
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
B(&done);
@@ -2377,9 +2380,9 @@ Operand MacroAssembler::ReceiverOperand(Register arg_count) {
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(type == InvokeType::kJump || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
@@ -2400,15 +2403,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(function, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(type == InvokeType::kJump || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
@@ -2419,7 +2422,7 @@ void MacroAssembler::InvokeFunction(Register function,
FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(function, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void TurboAssembler::TryConvertDoubleToInt64(Register result,
@@ -2664,7 +2667,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
Ldr(cp, MemOperand(scratch));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
// Also emit debug code to clear the cp in the top frame.
Mov(scratch2, Operand(Context::kInvalidContext));
Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress,
@@ -2715,15 +2718,6 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
IncrementCounter(counter, -value, scratch1, scratch2);
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- Mov(x1, ExternalReference::debug_restart_fp_address(isolate()));
- Ldr(x1, MemOperand(x1));
- Tst(x1, x1);
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
-}
-
void MacroAssembler::JumpIfObjectType(Register object, Register map,
Register type_reg, InstanceType type,
Label* if_cond_pass, Condition cond) {
@@ -2860,14 +2854,14 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressTaggedPointer");
Ldr(destination.W(), field_operand);
- Add(destination, kPointerCageBaseRegister, destination);
+ Add(destination, kPtrComprCageBaseRegister, destination);
RecordComment("]");
}
void TurboAssembler::DecompressTaggedPointer(const Register& destination,
const Register& source) {
RecordComment("[ DecompressTaggedPointer");
- Add(destination, kPointerCageBaseRegister, Operand(source, UXTW));
+ Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW));
RecordComment("]");
}
@@ -2875,7 +2869,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand) {
RecordComment("[ DecompressAnyTagged");
Ldr(destination.W(), field_operand);
- Add(destination, kPointerCageBaseRegister, destination);
+ Add(destination, kPtrComprCageBaseRegister, destination);
RecordComment("]");
}
@@ -2904,7 +2898,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip the barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -2912,7 +2906,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
// of the object, so offset must be a multiple of kTaggedSize.
DCHECK(IsAligned(offset, kTaggedSize));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
@@ -2924,7 +2918,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status,
- save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ save_fp, remembered_set_action, SmiCheck::kOmit);
Bind(&done);
}
@@ -3069,7 +3063,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite");
DCHECK(!AreAliased(object, value));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@@ -3079,7 +3073,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -3089,7 +3083,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -3112,13 +3106,13 @@ void MacroAssembler::RecordWrite(Register object, Operand offset,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Check(cond, reason);
}
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cond, AbortReason reason) {
@@ -3133,10 +3127,10 @@ void TurboAssembler::Trap() { Brk(0); }
void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); }
void TurboAssembler::Abort(AbortReason reason) {
-#ifdef DEBUG
- RecordComment("Abort message: ");
- RecordComment(GetAbortReason(reason));
-#endif
+ if (FLAG_code_comments) {
+ RecordComment("Abort message: ");
+ RecordComment(GetAbortReason(reason));
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
diff --git a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
index a749676cccd..7bc6432c360 100644
--- a/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/chromium/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -126,8 +126,6 @@ inline BranchType InvertBranchType(BranchType type) {
}
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
@@ -1849,17 +1847,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// ---- Calling / Jumping helpers ----
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
@@ -1882,7 +1880,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// 'call_kind' must be x5.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// On function call, call into the debugger.
void CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -1890,20 +1888,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
Register actual_parameter_count);
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// Invoke the JavaScript function in the given register.
// Changes the current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// ---- Code generation helpers ----
- // Frame restart support
- void MaybeDropFrames();
-
// ---------------------------------------------------------------------------
// Support functions.
@@ -2032,16 +2027,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot at |offset|
// has been written. |value| is the object being stored.
void RecordWrite(
Register object, Operand offset, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// ---------------------------------------------------------------------------
// Debugging.
diff --git a/chromium/v8/src/codegen/arm64/register-arm64.h b/chromium/v8/src/codegen/arm64/register-arm64.h
index 605856e51c1..1150daf4c60 100644
--- a/chromium/v8/src/codegen/arm64/register-arm64.h
+++ b/chromium/v8/src/codegen/arm64/register-arm64.h
@@ -102,7 +102,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
static constexpr CPURegister Create(int code, int size, RegisterType type) {
- CONSTEXPR_DCHECK(IsValid(code, size, type));
+ DCHECK(IsValid(code, size, type));
return CPURegister{code, size, type};
}
@@ -320,7 +320,7 @@ class VRegister : public CPURegister {
}
static constexpr VRegister Create(int code, int size, int lane_count = 1) {
- CONSTEXPR_DCHECK(IsValidLaneCount(lane_count));
+ DCHECK(IsValidLaneCount(lane_count));
return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister),
lane_count);
}
@@ -477,9 +477,9 @@ ALIAS_REGISTER(Register, kRootRegister, x26);
ALIAS_REGISTER(Register, rr, x26);
// Pointer cage base register.
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-ALIAS_REGISTER(Register, kPointerCageBaseRegister, x28);
+ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, x28);
#else
-ALIAS_REGISTER(Register, kPointerCageBaseRegister, kRootRegister);
+ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, kRootRegister);
#endif
// Context pointer register.
ALIAS_REGISTER(Register, cp, x27);
diff --git a/chromium/v8/src/codegen/assembler.cc b/chromium/v8/src/codegen/assembler.cc
index 95983705abd..bb80d366de3 100644
--- a/chromium/v8/src/codegen/assembler.cc
+++ b/chromium/v8/src/codegen/assembler.cc
@@ -69,7 +69,7 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
#endif
options.inline_offheap_trampolines &= !generating_embedded_builtin;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
- const base::AddressRegion& code_range = isolate->heap()->code_range();
+ const base::AddressRegion& code_range = isolate->heap()->code_region();
DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
options.code_range_start = code_range.begin();
#endif
@@ -180,7 +180,6 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options,
: buffer_(std::move(buffer)),
options_(options),
enabled_cpu_features_(0),
- emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
constant_pool_available_(false),
jump_optimization_info_(nullptr) {
@@ -298,6 +297,7 @@ Handle<HeapObject> AssemblerBase::GetEmbeddedObject(
int Assembler::WriteCodeComments() {
+ if (!FLAG_code_comments) return 0;
CHECK_IMPLIES(code_comments_writer_.entry_count() > 0,
options().emit_code_comments);
if (code_comments_writer_.entry_count() == 0) return 0;
diff --git a/chromium/v8/src/codegen/assembler.h b/chromium/v8/src/codegen/assembler.h
index 70669059664..98cca61a7c6 100644
--- a/chromium/v8/src/codegen/assembler.h
+++ b/chromium/v8/src/codegen/assembler.h
@@ -222,9 +222,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
const AssemblerOptions& options() const { return options_; }
- bool emit_debug_code() const { return emit_debug_code_; }
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
bool predictable_code_size() const { return predictable_code_size_; }
void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
@@ -291,7 +288,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
// Record an inline code comment that can be used by a disassembler.
// Use --code-comments to enable.
- void RecordComment(const char* msg) {
+ V8_INLINE void RecordComment(const char* msg) {
+ // Set explicit dependency on --code-comments for dead-code elimination in
+ // release builds.
+ if (!FLAG_code_comments) return;
if (options().emit_code_comments) {
code_comments_writer_.Add(pc_offset(), std::string(msg));
}
@@ -346,7 +346,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
DCHECK(!RelocInfo::IsNone(rmode));
if (options().disable_reloc_info_for_patching) return false;
if (RelocInfo::IsOnlyForSerializer(rmode) &&
- !options().record_reloc_info_for_serialization && !emit_debug_code()) {
+ !options().record_reloc_info_for_serialization && !FLAG_debug_code) {
return false;
}
return true;
@@ -378,7 +378,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
const AssemblerOptions options_;
uint64_t enabled_cpu_features_;
- bool emit_debug_code_;
bool predictable_code_size_;
// Indicates whether the constant pool can be accessed, which is only possible
@@ -392,20 +391,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
friend class ConstantPoolUnavailableScope;
};
-// Avoids emitting debug code during the lifetime of this scope object.
-class V8_NODISCARD DontEmitDebugCodeScope {
- public:
- explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
- : assembler_(assembler), old_value_(assembler->emit_debug_code()) {
- assembler_->set_emit_debug_code(false);
- }
- ~DontEmitDebugCodeScope() { assembler_->set_emit_debug_code(old_value_); }
-
- private:
- AssemblerBase* assembler_;
- bool old_value_;
-};
-
// Enable a specified feature within a scope.
class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
public:
@@ -425,7 +410,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope {
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
- ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default)
+ ~CpuFeatureScope() {
// Define a destructor to avoid unused variable warnings.
}
#endif
diff --git a/chromium/v8/src/codegen/bailout-reason.h b/chromium/v8/src/codegen/bailout-reason.h
index 57bbbca7230..e8afa74e168 100644
--- a/chromium/v8/src/codegen/bailout-reason.h
+++ b/chromium/v8/src/codegen/bailout-reason.h
@@ -24,6 +24,7 @@ namespace internal {
V(kExpectedFeedbackVector, "Expected feedback vector") \
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
"The function_data field should be a BytecodeArray on interpreter entry") \
+ V(kFpuTopIsNotZeroInDeoptimizer, "FPU TOP is not zero in deoptimizer") \
V(kInputStringTooLong, "Input string too long") \
V(kInvalidBytecode, "Invalid bytecode") \
V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
diff --git a/chromium/v8/src/codegen/code-factory.cc b/chromium/v8/src/codegen/code-factory.cc
index ece8200023d..854969f8cb4 100644
--- a/chromium/v8/src/codegen/code-factory.cc
+++ b/chromium/v8/src/codegen/code-factory.cc
@@ -31,25 +31,35 @@ Handle<Code> CodeFactory::CEntry(Isolate* isolate, int result_size,
const ArgvMode am = argv_mode;
const bool be = builtin_exit_frame;
- if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvOnStack && !be) {
+ if (rs == 1 && sd == SaveFPRegsMode::kIgnore && am == ArgvMode::kStack &&
+ !be) {
return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kStack && be) {
return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvInRegister && !be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kRegister && !be) {
return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit);
- } else if (rs == 1 && sd == kSaveFPRegs && am == kArgvOnStack && !be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ !be) {
return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 1 && sd == kSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ be) {
return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvOnStack && !be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kStack && !be) {
return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kStack && be) {
return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, BuiltinExit);
- } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvInRegister && !be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore &&
+ am == ArgvMode::kRegister && !be) {
return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit);
- } else if (rs == 2 && sd == kSaveFPRegs && am == kArgvOnStack && !be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ !be) {
return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, NoBuiltinExit);
- } else if (rs == 2 && sd == kSaveFPRegs && am == kArgvOnStack && be) {
+ } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack &&
+ be) {
return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, BuiltinExit);
}
@@ -70,7 +80,7 @@ Callable CodeFactory::CallApiCallback(Isolate* isolate) {
// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
- return typeof_mode == NOT_INSIDE_TYPEOF
+ return typeof_mode == TypeofMode::kNotInside
? Builtins::CallableFor(isolate, Builtins::kLoadGlobalICTrampoline)
: Builtins::CallableFor(
isolate, Builtins::kLoadGlobalICInsideTypeofTrampoline);
@@ -79,7 +89,7 @@ Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
// static
Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode) {
- return typeof_mode == NOT_INSIDE_TYPEOF
+ return typeof_mode == TypeofMode::kNotInside
? Builtins::CallableFor(isolate, Builtins::kLoadGlobalIC)
: Builtins::CallableFor(isolate,
Builtins::kLoadGlobalICInsideTypeof);
@@ -233,16 +243,6 @@ Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
}
// static
-Callable CodeFactory::FrameDropperTrampoline(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kFrameDropperTrampoline);
-}
-
-// static
-Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) {
- return Builtins::CallableFor(isolate, Builtins::kHandleDebuggerStatement);
-}
-
-// static
Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type) {
switch (scope_type) {
@@ -388,8 +388,8 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct(
Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) {
// Note: If we ever use fpregs in the interpreter then we will need to
// save fpregs too.
- Handle<Code> code = CodeFactory::CEntry(isolate, result_size, kDontSaveFPRegs,
- kArgvInRegister);
+ Handle<Code> code = CodeFactory::CEntry(
+ isolate, result_size, SaveFPRegsMode::kIgnore, ArgvMode::kRegister);
if (result_size == 1) {
return Callable(code, InterpreterCEntry1Descriptor{});
} else {
diff --git a/chromium/v8/src/codegen/code-factory.h b/chromium/v8/src/codegen/code-factory.h
index aab29770453..e55de10533e 100644
--- a/chromium/v8/src/codegen/code-factory.h
+++ b/chromium/v8/src/codegen/code-factory.h
@@ -28,10 +28,10 @@ class V8_EXPORT_PRIVATE CodeFactory final {
// is exported here.
static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
- static Handle<Code> CEntry(Isolate* isolate, int result_size = 1,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs,
- ArgvMode argv_mode = kArgvOnStack,
- bool builtin_exit_frame = false);
+ static Handle<Code> CEntry(
+ Isolate* isolate, int result_size = 1,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore,
+ ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false);
// Initial states for ICs.
static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode);
@@ -49,9 +49,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable ResumeGenerator(Isolate* isolate);
- static Callable FrameDropperTrampoline(Isolate* isolate);
- static Callable HandleDebuggerStatement(Isolate* isolate);
-
static Callable BinaryOperation(Isolate* isolate, Operation op);
static Callable ApiGetter(Isolate* isolate);
diff --git a/chromium/v8/src/codegen/code-stub-assembler.cc b/chromium/v8/src/codegen/code-stub-assembler.cc
index 0b039e40fac..5995a766d11 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.cc
+++ b/chromium/v8/src/codegen/code-stub-assembler.cc
@@ -103,7 +103,7 @@ void CodeStubAssembler::Check(const BranchGenerator& branch,
std::initializer_list<ExtraNode> extra_nodes) {
Label ok(this);
Label not_ok(this, Label::kDeferred);
- if (message != nullptr && FLAG_code_comments) {
+ if (message != nullptr) {
Comment("[ Assert: ", message);
} else {
Comment("[ Assert");
@@ -1368,6 +1368,7 @@ TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(
TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
+ if (FLAG_single_generation) flags |= kPretenured;
bool const new_space = !(flags & kPretenured);
bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
@@ -1574,8 +1575,8 @@ TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject(
TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset);
if (external_pointer_tag != 0) {
- TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
- entry = UncheckedCast<UintPtrT>(WordXor(entry, tag));
+ TNode<UintPtrT> tag = UintPtrConstant(~external_pointer_tag);
+ entry = UncheckedCast<UintPtrT>(WordAnd(entry, tag));
}
return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry));
#else
@@ -1603,7 +1604,7 @@ void CodeStubAssembler::StoreExternalPointerToObject(
TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer);
if (external_pointer_tag != 0) {
TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag);
- value = UncheckedCast<UintPtrT>(WordXor(pointer, tag));
+ value = UncheckedCast<UintPtrT>(WordOr(pointer, tag));
}
StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset,
value);
@@ -1619,6 +1620,8 @@ TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) {
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<HeapObject> object, int offset) {
+ // Please use LoadMap(object) instead.
+ DCHECK_NE(offset, HeapObject::kMapOffset);
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1631,6 +1634,8 @@ TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(
TNode<HeapObject> object, int offset) {
+ // Please use LoadMap(object) instead.
+ DCHECK_NE(offset, HeapObject::kMapOffset);
if (SmiValuesAre32Bits()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += 4;
@@ -1656,7 +1661,15 @@ TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) {
}
TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) {
- return LoadObjectField<Map>(object, HeapObject::kMapOffset);
+ TNode<Map> map = LoadObjectField<Map>(object, HeapObject::kMapOffset);
+#ifdef V8_MAP_PACKING
+ // Check the loaded map is unpacked. i.e. the lowest two bits != 0b10
+ CSA_ASSERT(this,
+ WordNotEqual(WordAnd(BitcastTaggedToWord(map),
+ IntPtrConstant(Internals::kMapWordXorMask)),
+ IntPtrConstant(Internals::kMapWordSignature)));
+#endif
+ return map;
}
TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) {
@@ -2033,6 +2046,13 @@ void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object,
Goto(if_strong);
}
+void CodeStubAssembler::AssertHasValidMap(TNode<HeapObject> object) {
+#ifdef V8_MAP_PACKING
+ // Test if the map is an unpacked and valid map
+ CSA_ASSERT(this, IsMap(LoadMap(object)));
+#endif
+}
+
TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) {
return Word32Equal(Word32And(TruncateIntPtrToInt32(
BitcastTaggedToWordForTagAndSmiBits(value)),
@@ -2943,12 +2963,14 @@ void CodeStubAssembler::StoreObjectField(TNode<HeapObject> object,
void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier(
TNode<HeapObject> object, int offset, TNode<Object> value) {
+ DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation::kTagged,
object, offset, value);
}
void CodeStubAssembler::StoreMap(TNode<HeapObject> object, TNode<Map> map) {
OptimizedStoreMap(object, map);
+ AssertHasValidMap(object);
}
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
@@ -2958,16 +2980,19 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
void CodeStubAssembler::StoreMapNoWriteBarrier(TNode<HeapObject> object,
TNode<Map> map) {
- OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation::kTaggedPointer,
- object, HeapObject::kMapOffset, map);
+ OptimizedStoreMap(object, map);
+ AssertHasValidMap(object);
}
void CodeStubAssembler::StoreObjectFieldRoot(TNode<HeapObject> object,
int offset, RootIndex root_index) {
- if (RootsTable::IsImmortalImmovable(root_index)) {
- StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
+ TNode<Object> root = LoadRoot(root_index);
+ if (offset == HeapObject::kMapOffset) {
+ StoreMap(object, CAST(root));
+ } else if (RootsTable::IsImmortalImmovable(root_index)) {
+ StoreObjectFieldNoWriteBarrier(object, offset, root);
} else {
- StoreObjectField(object, offset, LoadRoot(root_index));
+ StoreObjectField(object, offset, root);
}
}
@@ -4762,7 +4787,11 @@ void CodeStubAssembler::MoveElements(ElementsKind kind,
TNode<IntPtrT> length) {
Label finished(this);
Label needs_barrier(this);
+#ifdef V8_DISABLE_WRITE_BARRIERS
+ const bool needs_barrier_check = false;
+#else
const bool needs_barrier_check = !IsDoubleElementsKind(kind);
+#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind));
@@ -4847,7 +4876,11 @@ void CodeStubAssembler::CopyElements(ElementsKind kind,
WriteBarrierMode write_barrier) {
Label finished(this);
Label needs_barrier(this);
+#ifdef V8_DISABLE_WRITE_BARRIERS
+ const bool needs_barrier_check = false;
+#else
const bool needs_barrier_check = !IsDoubleElementsKind(kind);
+#endif // V8_DISABLE_WRITE_BARRIERS
DCHECK(IsFastElementsKind(kind));
CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind));
@@ -5294,6 +5327,10 @@ TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity(
return new_elements;
}
+template TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity<IntPtrT>(
+ TNode<HeapObject>, TNode<FixedArrayBase>, ElementsKind, ElementsKind,
+ TNode<IntPtrT>, TNode<IntPtrT>, compiler::CodeAssemblerLabel*);
+
void CodeStubAssembler::InitializeAllocationMemento(
TNode<HeapObject> base, TNode<IntPtrT> base_allocation_size,
TNode<AllocationSite> allocation_site) {
@@ -6036,6 +6073,13 @@ TNode<BoolT> CodeStubAssembler::IsNoElementsProtectorCellInvalid() {
return TaggedEqual(cell_value, invalid);
}
+TNode<BoolT> CodeStubAssembler::IsMegaDOMProtectorCellInvalid() {
+ TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
+ TNode<PropertyCell> cell = MegaDOMProtectorConstant();
+ TNode<Object> cell_value = LoadObjectField(cell, PropertyCell::kValueOffset);
+ return TaggedEqual(cell_value, invalid);
+}
+
TNode<BoolT> CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() {
TNode<Smi> invalid = SmiConstant(Protectors::kProtectorInvalid);
TNode<PropertyCell> cell = ArrayIteratorProtectorConstant();
@@ -6285,14 +6329,27 @@ TNode<BoolT> CodeStubAssembler::IsJSObjectInstanceType(
Int32Constant(FIRST_JS_OBJECT_TYPE));
}
+TNode<BoolT> CodeStubAssembler::IsJSApiObjectInstanceType(
+ TNode<Int32T> instance_type) {
+ return InstanceTypeEqual(instance_type, JS_API_OBJECT_TYPE);
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObjectMap(TNode<Map> map) {
return IsJSObjectInstanceType(LoadMapInstanceType(map));
}
+TNode<BoolT> CodeStubAssembler::IsJSApiObjectMap(TNode<Map> map) {
+ return IsJSApiObjectInstanceType(LoadMapInstanceType(map));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSObject(TNode<HeapObject> object) {
return IsJSObjectMap(LoadMap(object));
}
+TNode<BoolT> CodeStubAssembler::IsJSApiObject(TNode<HeapObject> object) {
+ return IsJSApiObjectMap(LoadMap(object));
+}
+
TNode<BoolT> CodeStubAssembler::IsJSFinalizationRegistryMap(TNode<Map> map) {
return InstanceTypeEqual(LoadMapInstanceType(map),
JS_FINALIZATION_REGISTRY_TYPE);
@@ -7672,15 +7729,25 @@ TNode<Object> CodeStubAssembler::OrdinaryToPrimitive(
TNode<Uint32T> CodeStubAssembler::DecodeWord32(TNode<Word32T> word32,
uint32_t shift, uint32_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
- return Unsigned(Word32And(Word32Shr(word32, static_cast<int>(shift)),
- Int32Constant(mask >> shift)));
+ if ((std::numeric_limits<uint32_t>::max() >> shift) ==
+ ((std::numeric_limits<uint32_t>::max() & mask) >> shift)) {
+ return Unsigned(Word32Shr(word32, static_cast<int>(shift)));
+ } else {
+ return Unsigned(Word32And(Word32Shr(word32, static_cast<int>(shift)),
+ Int32Constant(mask >> shift)));
+ }
}
TNode<UintPtrT> CodeStubAssembler::DecodeWord(TNode<WordT> word, uint32_t shift,
uintptr_t mask) {
DCHECK_EQ((mask >> shift) << shift, mask);
- return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
- IntPtrConstant(mask >> shift)));
+ if ((std::numeric_limits<uintptr_t>::max() >> shift) ==
+ ((std::numeric_limits<uintptr_t>::max() & mask) >> shift)) {
+ return Unsigned(WordShr(word, static_cast<int>(shift)));
+ } else {
+ return Unsigned(WordAnd(WordShr(word, static_cast<int>(shift)),
+ IntPtrConstant(mask >> shift)));
+ }
}
TNode<Word32T> CodeStubAssembler::UpdateWord32(TNode<Word32T> word,
@@ -8871,9 +8938,9 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
{
Label slow_load(this, Label::kDeferred);
- var_value = CallGetterIfAccessor(var_value.value(), object,
- var_details.value(), context,
- object, &slow_load, kCallJSGetter);
+ var_value = CallGetterIfAccessor(
+ var_value.value(), object, var_details.value(), context, object,
+ next_key, &slow_load, kCallJSGetter);
Goto(&callback);
BIND(&slow_load);
@@ -9325,8 +9392,8 @@ template void CodeStubAssembler::LoadPropertyFromDictionary(
// result of the getter call.
TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
TNode<Object> value, TNode<HeapObject> holder, TNode<Uint32T> details,
- TNode<Context> context, TNode<Object> receiver, Label* if_bailout,
- GetOwnPropertyMode mode) {
+ TNode<Context> context, TNode<Object> receiver, TNode<Object> name,
+ Label* if_bailout, GetOwnPropertyMode mode) {
TVARIABLE(Object, var_value, value);
Label done(this), if_accessor_info(this, Label::kDeferred);
@@ -9354,13 +9421,16 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
BIND(&if_callable);
{
- // Call the accessor.
+ // Call the accessor. No need to check side-effect mode here, since it
+ // will be checked later in DebugOnFunctionCall.
var_value = Call(context, getter, receiver);
Goto(&done);
}
BIND(&if_function_template_info);
{
+ Label runtime(this, Label::kDeferred);
+ GotoIf(IsSideEffectFreeDebuggingActive(), &runtime);
TNode<HeapObject> cached_property_name = LoadObjectField<HeapObject>(
getter, FunctionTemplateInfo::kCachedPropertyNameOffset);
GotoIfNot(IsTheHole(cached_property_name), if_bailout);
@@ -9371,6 +9441,13 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
creation_context, getter, IntPtrConstant(0), receiver);
Goto(&done);
+
+ BIND(&runtime);
+ {
+ var_value = CallRuntime(Runtime::kGetProperty, context, holder, name,
+ receiver);
+ Goto(&done);
+ }
}
} else {
Goto(&done);
@@ -9505,7 +9582,7 @@ void CodeStubAssembler::TryGetOwnProperty(
}
TNode<Object> value =
CallGetterIfAccessor(var_value->value(), object, var_details->value(),
- context, receiver, if_bailout, mode);
+ context, receiver, unique_name, if_bailout, mode);
*var_value = value;
Goto(if_found_value);
}
@@ -9554,6 +9631,7 @@ void CodeStubAssembler::TryLookupElement(
BIGUINT64_ELEMENTS,
BIGINT64_ELEMENTS,
};
+ // TODO(v8:11111): Support RAB / GSAB.
Label* labels[] = {
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
&if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi,
@@ -10811,6 +10889,12 @@ void CodeStubAssembler::EmitElementStore(
TNode<Context> context, TVariable<Object>* maybe_converted_value) {
CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object)));
+ // TODO(v8:11111): Fast path for RAB / GSAB backed TypedArrays.
+ if (IsRabGsabTypedArrayElementsKind(elements_kind)) {
+ GotoIf(Int32TrueConstant(), bailout);
+ return;
+ }
+
TNode<FixedArrayBase> elements = LoadElements(object);
if (!(IsSmiOrObjectElementsKind(elements_kind) ||
IsSealedElementsKind(elements_kind) ||
@@ -11057,6 +11141,8 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
// Bail out if the object is not in new space.
TNode<IntPtrT> object_word = BitcastTaggedToWord(object);
+ // TODO(v8:11641): Skip TrapAllocationMemento when allocation-site
+ // tracking is disabled.
TNode<IntPtrT> object_page = PageFromAddress(object_word);
{
TNode<IntPtrT> page_flags =
@@ -11102,15 +11188,19 @@ void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
// Memento map check.
BIND(&map_check);
{
- TNode<Object> memento_map = LoadObjectField(object, kMementoMapOffset);
- Branch(TaggedEqual(memento_map, AllocationMementoMapConstant()),
- memento_found, &no_memento_found);
+ TNode<AnyTaggedT> maybe_mapword =
+ LoadObjectField(object, kMementoMapOffset);
+ TNode<AnyTaggedT> memento_mapword =
+ LoadRootMapWord(RootIndex::kAllocationMementoMap);
+ Branch(TaggedEqual(maybe_mapword, memento_mapword), memento_found,
+ &no_memento_found);
}
BIND(&no_memento_found);
Comment("] TrapAllocationMemento");
}
TNode<IntPtrT> CodeStubAssembler::PageFromAddress(TNode<IntPtrT> address) {
+ if (FLAG_enable_third_party_heap) Unreachable();
return WordAnd(address, IntPtrConstant(~kPageAlignmentMask));
}
@@ -11323,7 +11413,12 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
- TNode<Object> root_value = LoadRoot(root_index);
+ TNode<AnyTaggedT> root_value;
+ if (root_index == RootIndex::kOnePointerFillerMap) {
+ root_value = LoadRootMapWord(root_index);
+ } else {
+ root_value = LoadRoot(root_index);
+ }
BuildFastLoop<IntPtrT>(
end_offset, start_offset,
[=](TNode<IntPtrT> current) {
@@ -13597,6 +13692,149 @@ TNode<UintPtrT> CodeStubAssembler::LoadJSTypedArrayLength(
return LoadObjectField<UintPtrT>(typed_array, JSTypedArray::kLengthOffset);
}
+// ES #sec-integerindexedobjectlength
+TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayLength(
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss) {
+ Label is_gsab(this), is_rab(this), end(this);
+ TVARIABLE(UintPtrT, result);
+
+ Branch(IsSharedArrayBuffer(buffer), &is_gsab, &is_rab);
+ BIND(&is_gsab);
+ {
+ // Non-length-tracking GSAB-backed TypedArrays shouldn't end up here.
+ CSA_ASSERT(this, IsLengthTrackingTypedArray(array));
+ // Read the byte length from the BackingStore.
+ const TNode<ExternalReference> length_function = ExternalConstant(
+ ExternalReference::length_tracking_gsab_backed_typed_array_length());
+ TNode<ExternalReference> isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ result = UncheckedCast<UintPtrT>(
+ CallCFunction(length_function, MachineType::UintPtr(),
+ std::make_pair(MachineType::Pointer(), isolate_ptr),
+ std::make_pair(MachineType::AnyTagged(), array)));
+ Goto(&end);
+ }
+
+ BIND(&is_rab);
+ {
+ GotoIf(IsDetachedBuffer(buffer), miss);
+
+ TNode<UintPtrT> buffer_byte_length = LoadJSArrayBufferByteLength(buffer);
+ TNode<UintPtrT> array_byte_offset = LoadJSArrayBufferViewByteOffset(array);
+
+ Label is_length_tracking(this), not_length_tracking(this);
+ Branch(IsLengthTrackingTypedArray(array), &is_length_tracking,
+ &not_length_tracking);
+
+ BIND(&is_length_tracking);
+ {
+ // The backing RAB might have been shrunk so that the start of the
+ // TypedArray is already out of bounds.
+ GotoIfNot(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length),
+ miss);
+ // length = (buffer_byte_length - byte_offset) / element_size
+ // Conversion to signed is OK since buffer_byte_length <
+ // JSArrayBuffer::kMaxByteLength.
+ TNode<IntPtrT> element_size =
+ RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
+ TNode<IntPtrT> length =
+ IntPtrDiv(Signed(UintPtrSub(buffer_byte_length, array_byte_offset)),
+ element_size);
+ result = Unsigned(length);
+ Goto(&end);
+ }
+
+ BIND(&not_length_tracking);
+ {
+ // Check if the backing RAB has shrunk so that the buffer is out of
+ // bounds.
+ TNode<UintPtrT> array_byte_length =
+ LoadJSArrayBufferViewByteLength(array);
+ GotoIfNot(UintPtrGreaterThanOrEqual(
+ buffer_byte_length,
+ UintPtrAdd(array_byte_offset, array_byte_length)),
+ miss);
+ result = LoadJSTypedArrayLength(array);
+ Goto(&end);
+ }
+ }
+ BIND(&end);
+ return result.value();
+}
+
+// ES #sec-integerindexedobjectbytelength
+TNode<UintPtrT> CodeStubAssembler::LoadVariableLengthJSTypedArrayByteLength(
+ TNode<Context> context, TNode<JSTypedArray> array,
+ TNode<JSArrayBuffer> buffer) {
+ Label miss(this), end(this);
+ TVARIABLE(UintPtrT, result);
+
+ TNode<UintPtrT> length =
+ LoadVariableLengthJSTypedArrayLength(array, buffer, &miss);
+ TNode<IntPtrT> element_size =
+ RabGsabElementsKindToElementByteSize(LoadElementsKind(array));
+ // Conversion to signed is OK since length < JSArrayBuffer::kMaxByteLength.
+ TNode<IntPtrT> byte_length = IntPtrMul(Signed(length), element_size);
+ result = Unsigned(byte_length);
+ Goto(&end);
+ BIND(&miss);
+ {
+ result = UintPtrConstant(0);
+ Goto(&end);
+ }
+ BIND(&end);
+ return result.value();
+}
+
+TNode<IntPtrT> CodeStubAssembler::RabGsabElementsKindToElementByteSize(
+ TNode<Int32T> elements_kind) {
+ TVARIABLE(IntPtrT, result);
+ Label elements_8(this), elements_16(this), elements_32(this),
+ elements_64(this), not_found(this), end(this);
+ int32_t elements_kinds[] = {
+ RAB_GSAB_UINT8_ELEMENTS, RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ RAB_GSAB_INT8_ELEMENTS, RAB_GSAB_UINT16_ELEMENTS,
+ RAB_GSAB_INT16_ELEMENTS, RAB_GSAB_UINT32_ELEMENTS,
+ RAB_GSAB_INT32_ELEMENTS, RAB_GSAB_FLOAT32_ELEMENTS,
+ RAB_GSAB_FLOAT64_ELEMENTS, RAB_GSAB_BIGINT64_ELEMENTS,
+ RAB_GSAB_BIGUINT64_ELEMENTS};
+ Label* elements_kind_labels[] = {&elements_8, &elements_8, &elements_8,
+ &elements_16, &elements_16, &elements_32,
+ &elements_32, &elements_32, &elements_64,
+ &elements_64, &elements_64};
+ const size_t kTypedElementsKindCount =
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, &not_found, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+ BIND(&elements_8);
+ {
+ result = IntPtrConstant(1);
+ Goto(&end);
+ }
+ BIND(&elements_16);
+ {
+ result = IntPtrConstant(2);
+ Goto(&end);
+ }
+ BIND(&elements_32);
+ {
+ result = IntPtrConstant(4);
+ Goto(&end);
+ }
+ BIND(&elements_64);
+ {
+ result = IntPtrConstant(8);
+ Goto(&end);
+ }
+ BIND(&not_found);
+ { Unreachable(); }
+ BIND(&end);
+ return result.value();
+}
+
TNode<JSArrayBuffer> CodeStubAssembler::GetTypedArrayBuffer(
TNode<Context> context, TNode<JSTypedArray> array) {
Label call_runtime(this), done(this);
@@ -13799,10 +14037,15 @@ TNode<BoolT> CodeStubAssembler::IsDebugActive() {
return Word32NotEqual(is_debug_active, Int32Constant(0));
}
-TNode<BoolT> CodeStubAssembler::IsPromiseHookEnabled() {
- const TNode<RawPtrT> promise_hook = Load<RawPtrT>(
- ExternalConstant(ExternalReference::promise_hook_address(isolate())));
- return WordNotEqual(promise_hook, IntPtrConstant(0));
+TNode<BoolT> CodeStubAssembler::IsSideEffectFreeDebuggingActive() {
+ TNode<Uint8T> debug_execution_mode = Load<Uint8T>(ExternalConstant(
+ ExternalReference::debug_execution_mode_address(isolate())));
+
+ TNode<BoolT> is_active =
+ Word32Equal(debug_execution_mode,
+ Int32Constant(DebugInfo::ExecutionMode::kSideEffects));
+
+ return is_active;
}
TNode<BoolT> CodeStubAssembler::HasAsyncEventDelegate() {
@@ -13811,23 +14054,40 @@ TNode<BoolT> CodeStubAssembler::HasAsyncEventDelegate() {
return WordNotEqual(async_event_delegate, IntPtrConstant(0));
}
-TNode<BoolT> CodeStubAssembler::IsPromiseHookEnabledOrHasAsyncEventDelegate() {
- const TNode<Uint8T> promise_hook_or_async_event_delegate =
- Load<Uint8T>(ExternalConstant(
- ExternalReference::promise_hook_or_async_event_delegate_address(
- isolate())));
- return Word32NotEqual(promise_hook_or_async_event_delegate, Int32Constant(0));
+TNode<Uint32T> CodeStubAssembler::PromiseHookFlags() {
+ return Load<Uint32T>(ExternalConstant(
+ ExternalReference::promise_hook_flags_address(isolate())));
+}
+
+TNode<BoolT> CodeStubAssembler::IsAnyPromiseHookEnabled(TNode<Uint32T> flags) {
+ uint32_t mask = Isolate::PromiseHookFields::HasContextPromiseHook::kMask |
+ Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask;
+ return IsSetWord32(flags, mask);
+}
+
+TNode<BoolT> CodeStubAssembler::IsContextPromiseHookEnabled(
+ TNode<Uint32T> flags) {
+ return IsSetWord32<Isolate::PromiseHookFields::HasContextPromiseHook>(flags);
}
TNode<BoolT> CodeStubAssembler::
- IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
- const TNode<Uint8T> promise_hook_or_debug_is_active_or_async_event_delegate =
- Load<Uint8T>(ExternalConstant(
- ExternalReference::
- promise_hook_or_debug_is_active_or_async_event_delegate_address(
- isolate())));
- return Word32NotEqual(promise_hook_or_debug_is_active_or_async_event_delegate,
- Int32Constant(0));
+ IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(TNode<Uint32T> flags) {
+ uint32_t mask = Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask |
+ Isolate::PromiseHookFields::HasAsyncEventDelegate::kMask;
+ return IsSetWord32(flags, mask);
+}
+
+TNode<BoolT> CodeStubAssembler::
+ IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ TNode<Uint32T> flags) {
+ uint32_t mask = Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask |
+ Isolate::PromiseHookFields::HasAsyncEventDelegate::kMask |
+ Isolate::PromiseHookFields::IsDebugActive::kMask;
+ return IsSetWord32(flags, mask);
+}
+
+TNode<BoolT> CodeStubAssembler::NeedsAnyPromiseHooks(TNode<Uint32T> flags) {
+ return Word32NotEqual(flags, Int32Constant(0));
}
TNode<Code> CodeStubAssembler::LoadBuiltin(TNode<Smi> builtin_id) {
diff --git a/chromium/v8/src/codegen/code-stub-assembler.h b/chromium/v8/src/codegen/code-stub-assembler.h
index 72b8fbc8a80..9b54b5014e3 100644
--- a/chromium/v8/src/codegen/code-stub-assembler.h
+++ b/chromium/v8/src/codegen/code-stub-assembler.h
@@ -67,6 +67,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
AsyncIteratorValueUnwrapSharedFun) \
V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \
V(NoElementsProtector, no_elements_protector, NoElementsProtector) \
+ V(MegaDOMProtector, mega_dom_protector, MegaDOMProtector) \
V(NumberStringCache, number_string_cache, NumberStringCache) \
V(PromiseAllResolveElementSharedFun, promise_all_resolve_element_shared_fun, \
PromiseAllResolveElementSharedFun) \
@@ -157,6 +158,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \
V(match_symbol, match_symbol, MatchSymbol) \
V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \
+ V(mega_dom_symbol, mega_dom_symbol, MegaDOMSymbol) \
V(message_string, message_string, MessageString) \
V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \
V(MinusZeroValue, minus_zero_value, MinusZero) \
@@ -1088,7 +1090,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
// Load a field from an object on the heap.
template <class T, typename std::enable_if<
- std::is_convertible<TNode<T>, TNode<Object>>::value,
+ std::is_convertible<TNode<T>, TNode<Object>>::value &&
+ std::is_base_of<T, Map>::value,
+ int>::type = 0>
+ TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
+ const MachineType machine_type = offset == HeapObject::kMapOffset
+ ? MachineType::MapInHeader()
+ : MachineTypeOf<T>::value;
+ return CAST(LoadFromObject(machine_type, object,
+ IntPtrConstant(offset - kHeapObjectTag)));
+ }
+ template <class T, typename std::enable_if<
+ std::is_convertible<TNode<T>, TNode<Object>>::value &&
+ !std::is_base_of<T, Map>::value,
int>::type = 0>
TNode<T> LoadObjectField(TNode<HeapObject> object, int offset) {
return CAST(LoadFromObject(MachineTypeOf<T>::value, object,
@@ -1163,6 +1177,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<Object>>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
+ if (IsMapOffsetConstant(reference.offset)) {
+ TNode<Map> map = LoadMap(CAST(reference.object));
+ DCHECK((std::is_base_of<T, Map>::value));
+ return ReinterpretCast<T>(map);
+ }
+
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
CSA_ASSERT(this, TaggedIsNotSmi(reference.object));
@@ -1175,6 +1195,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_same<T, MaybeObject>::value,
int>::type = 0>
TNode<T> LoadReference(Reference reference) {
+ DCHECK(!IsMapOffsetConstant(reference.offset));
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
return UncheckedCast<T>(
@@ -1185,6 +1206,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_same<T, MaybeObject>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
+ if (IsMapOffsetConstant(reference.offset)) {
+ DCHECK((std::is_base_of<T, Map>::value));
+ return StoreMap(CAST(reference.object), ReinterpretCast<Map>(value));
+ }
MachineRepresentation rep = MachineRepresentationOf<T>::value;
StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull;
if (std::is_same<T, Smi>::value) {
@@ -1201,6 +1226,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
std::is_convertible<TNode<T>, TNode<UntaggedT>>::value,
int>::type = 0>
void StoreReference(Reference reference, TNode<T> value) {
+ DCHECK(!IsMapOffsetConstant(reference.offset));
TNode<IntPtrT> offset =
IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag));
StoreToObject(MachineRepresentationOf<T>::value, reference.object, offset,
@@ -2346,6 +2372,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSProxy or an object with interceptors.
TNode<BoolT> InstanceTypeEqual(TNode<Int32T> instance_type, int type);
TNode<BoolT> IsNoElementsProtectorCellInvalid();
+ TNode<BoolT> IsMegaDOMProtectorCellInvalid();
TNode<BoolT> IsArrayIteratorProtectorCellInvalid();
TNode<BoolT> IsBigIntInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsBigInt(TNode<HeapObject> object);
@@ -2395,6 +2422,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<BoolT> IsJSObjectInstanceType(TNode<Int32T> instance_type);
TNode<BoolT> IsJSObjectMap(TNode<Map> map);
TNode<BoolT> IsJSObject(TNode<HeapObject> object);
+ TNode<BoolT> IsJSApiObjectInstanceType(TNode<Int32T> instance_type);
+ TNode<BoolT> IsJSApiObjectMap(TNode<Map> map);
+ TNode<BoolT> IsJSApiObject(TNode<HeapObject> object);
TNode<BoolT> IsJSFinalizationRegistryMap(TNode<Map> map);
TNode<BoolT> IsJSFinalizationRegistry(TNode<HeapObject> object);
TNode<BoolT> IsJSPromiseMap(TNode<Map> map);
@@ -3451,6 +3481,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Debug helpers
TNode<BoolT> IsDebugActive();
+ TNode<BoolT> IsSideEffectFreeDebuggingActive();
// JSArrayBuffer helpers
TNode<RawPtrT> LoadJSArrayBufferBackingStorePtr(
@@ -3464,6 +3495,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<JSArrayBufferView> array_buffer_view);
TNode<UintPtrT> LoadJSArrayBufferViewByteLength(
TNode<JSArrayBufferView> array_buffer_view);
+
TNode<UintPtrT> LoadJSArrayBufferViewByteOffset(
TNode<JSArrayBufferView> array_buffer_view);
void ThrowIfArrayBufferViewBufferIsDetached(
@@ -3472,6 +3504,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// JSTypedArray helpers
TNode<UintPtrT> LoadJSTypedArrayLength(TNode<JSTypedArray> typed_array);
+ // Helper for length tracking JSTypedArrays and JSTypedArrays backed by
+ // ResizableArrayBuffer.
+ TNode<UintPtrT> LoadVariableLengthJSTypedArrayLength(
+ TNode<JSTypedArray> array, TNode<JSArrayBuffer> buffer, Label* miss);
+ // Helper for length tracking JSTypedArrays and JSTypedArrays backed by
+ // ResizableArrayBuffer.
+ TNode<UintPtrT> LoadVariableLengthJSTypedArrayByteLength(
+ TNode<Context> context, TNode<JSTypedArray> array,
+ TNode<JSArrayBuffer> buffer);
+ TNode<IntPtrT> RabGsabElementsKindToElementByteSize(
+ TNode<Int32T> elementsKind);
TNode<RawPtrT> LoadJSTypedArrayDataPtr(TNode<JSTypedArray> typed_array);
TNode<JSArrayBuffer> GetTypedArrayBuffer(TNode<Context> context,
TNode<JSTypedArray> array);
@@ -3504,10 +3547,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Context> context);
// Promise helpers
- TNode<BoolT> IsPromiseHookEnabled();
+ TNode<Uint32T> PromiseHookFlags();
TNode<BoolT> HasAsyncEventDelegate();
- TNode<BoolT> IsPromiseHookEnabledOrHasAsyncEventDelegate();
- TNode<BoolT> IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate();
+ TNode<BoolT> IsContextPromiseHookEnabled(TNode<Uint32T> flags);
+ TNode<BoolT> IsContextPromiseHookEnabled() {
+ return IsContextPromiseHookEnabled(PromiseHookFlags());
+ }
+ TNode<BoolT> IsAnyPromiseHookEnabled(TNode<Uint32T> flags);
+ TNode<BoolT> IsAnyPromiseHookEnabled() {
+ return IsAnyPromiseHookEnabled(PromiseHookFlags());
+ }
+ TNode<BoolT> IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(
+ TNode<Uint32T> flags);
+ TNode<BoolT> IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate() {
+ return IsIsolatePromiseHookEnabledOrHasAsyncEventDelegate(
+ PromiseHookFlags());
+ }
+ TNode<BoolT>
+ IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ TNode<Uint32T> flags);
+ TNode<BoolT>
+ IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() {
+ return IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate(
+ PromiseHookFlags());
+ }
+
+ TNode<BoolT> NeedsAnyPromiseHooks(TNode<Uint32T> flags);
+ TNode<BoolT> NeedsAnyPromiseHooks() {
+ return NeedsAnyPromiseHooks(PromiseHookFlags());
+ }
// for..in helpers
void CheckPrototypeEnumCache(TNode<JSReceiver> receiver,
@@ -3568,6 +3636,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
}
int32_t ConstexprWord32Or(int32_t a, int32_t b) { return a | b; }
+ uint32_t ConstexprWord32Shl(uint32_t a, int32_t b) { return a << b; }
bool ConstexprUintPtrLessThan(uintptr_t a, uintptr_t b) { return a < b; }
@@ -3680,12 +3749,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
const ForEachKeyValueFunction& body,
Label* bailout);
- TNode<Object> CallGetterIfAccessor(TNode<Object> value,
- TNode<HeapObject> holder,
- TNode<Uint32T> details,
- TNode<Context> context,
- TNode<Object> receiver, Label* if_bailout,
- GetOwnPropertyMode mode = kCallJSGetter);
+ TNode<Object> CallGetterIfAccessor(
+ TNode<Object> value, TNode<HeapObject> holder, TNode<Uint32T> details,
+ TNode<Context> context, TNode<Object> receiver, TNode<Object> name,
+ Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetter);
TNode<IntPtrT> TryToIntptr(TNode<Object> key, Label* if_not_intptr,
TVariable<Int32T>* var_instance_type = nullptr);
@@ -3884,6 +3951,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
return CodeAssembler::LoadRoot(root_index);
}
+ TNode<AnyTaggedT> LoadRootMapWord(RootIndex root_index) {
+ return CodeAssembler::LoadRootMapWord(root_index);
+ }
+
template <typename TIndex>
void StoreFixedArrayOrPropertyArrayElement(
TNode<UnionT<FixedArray, PropertyArray>> array, TNode<TIndex> index,
@@ -3923,6 +3994,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TVariable<Number>* var_result,
Label* if_bailout);
+ void AssertHasValidMap(TNode<HeapObject> object);
+
template <typename TValue>
void EmitElementStoreTypedArray(TNode<JSTypedArray> typed_array,
TNode<IntPtrT> key, TNode<Object> value,
diff --git a/chromium/v8/src/codegen/compilation-cache.cc b/chromium/v8/src/codegen/compilation-cache.cc
index 3941e56e6a6..ee50f8b0153 100644
--- a/chromium/v8/src/codegen/compilation-cache.cc
+++ b/chromium/v8/src/codegen/compilation-cache.cc
@@ -29,10 +29,9 @@ CompilationCache::CompilationCache(Isolate* isolate)
eval_global_(isolate),
eval_contextual_(isolate),
reg_exp_(isolate, kRegExpGenerations),
- code_(isolate),
enabled_script_and_eval_(true) {
CompilationSubCache* subcaches[kSubCacheCount] = {
- &script_, &eval_global_, &eval_contextual_, &reg_exp_, &code_};
+ &script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
subcaches_[i] = subcaches[i];
}
@@ -77,10 +76,6 @@ void CompilationCacheScript::Age() {
}
void CompilationCacheEval::Age() { AgeCustom(this); }
void CompilationCacheRegExp::Age() { AgeByGeneration(this); }
-void CompilationCacheCode::Age() {
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing();
- AgeByGeneration(this);
-}
void CompilationSubCache::Iterate(RootVisitor* v) {
v->VisitRootPointers(Root::kCompilationCache, nullptr,
@@ -267,58 +262,6 @@ void CompilationCacheRegExp::Put(Handle<String> source, JSRegExp::Flags flags,
CompilationCacheTable::PutRegExp(isolate(), table, source, flags, data));
}
-MaybeHandle<Code> CompilationCacheCode::Lookup(Handle<SharedFunctionInfo> key) {
- // Make sure not to leak the table into the surrounding handle
- // scope. Otherwise, we risk keeping old tables around even after
- // having cleared the cache.
- HandleScope scope(isolate());
- MaybeHandle<Code> maybe_value;
- int generation = 0;
- for (; generation < generations(); generation++) {
- Handle<CompilationCacheTable> table = GetTable(generation);
- maybe_value = table->LookupCode(key);
- if (!maybe_value.is_null()) break;
- }
-
- if (maybe_value.is_null()) {
- isolate()->counters()->compilation_cache_misses()->Increment();
- return MaybeHandle<Code>();
- }
-
- Handle<Code> value = maybe_value.ToHandleChecked();
- if (generation != 0) Put(key, value); // Add to the first generation.
- isolate()->counters()->compilation_cache_hits()->Increment();
- return scope.CloseAndEscape(value);
-}
-
-void CompilationCacheCode::Put(Handle<SharedFunctionInfo> key,
- Handle<Code> value) {
- HandleScope scope(isolate());
- Handle<CompilationCacheTable> table = GetFirstTable();
- SetFirstTable(CompilationCacheTable::PutCode(isolate(), table, key, value));
-}
-
-void CompilationCacheCode::TraceAgeing() {
- DCHECK(FLAG_trace_turbo_nci);
- StdoutStream os;
- os << "NCI cache ageing: Removing oldest generation" << std::endl;
-}
-
-void CompilationCacheCode::TraceInsertion(Handle<SharedFunctionInfo> key,
- Handle<Code> value) {
- DCHECK(FLAG_trace_turbo_nci);
- StdoutStream os;
- os << "NCI cache insertion: " << Brief(*key) << ", " << Brief(*value)
- << std::endl;
-}
-
-void CompilationCacheCode::TraceHit(Handle<SharedFunctionInfo> key,
- Handle<Code> value) {
- DCHECK(FLAG_trace_turbo_nci);
- StdoutStream os;
- os << "NCI cache hit: " << Brief(*key) << ", " << Brief(*value) << std::endl;
-}
-
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabledScriptAndEval()) return;
@@ -372,10 +315,6 @@ MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return reg_exp_.Lookup(source, flags);
}
-MaybeHandle<Code> CompilationCache::LookupCode(Handle<SharedFunctionInfo> sfi) {
- return code_.Lookup(sfi);
-}
-
void CompilationCache::PutScript(Handle<String> source,
LanguageMode language_mode,
Handle<SharedFunctionInfo> function_info) {
@@ -414,11 +353,6 @@ void CompilationCache::PutRegExp(Handle<String> source, JSRegExp::Flags flags,
reg_exp_.Put(source, flags, data);
}
-void CompilationCache::PutCode(Handle<SharedFunctionInfo> shared,
- Handle<Code> code) {
- code_.Put(shared, code);
-}
-
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear();
diff --git a/chromium/v8/src/codegen/compilation-cache.h b/chromium/v8/src/codegen/compilation-cache.h
index 0ed13e53b6d..d4f4ae52dca 100644
--- a/chromium/v8/src/codegen/compilation-cache.h
+++ b/chromium/v8/src/codegen/compilation-cache.h
@@ -150,32 +150,6 @@ class CompilationCacheRegExp : public CompilationSubCache {
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
-// Sub-cache for Code objects. All code inserted into this cache must
-// be usable across different native contexts.
-class CompilationCacheCode : public CompilationSubCache {
- public:
- explicit CompilationCacheCode(Isolate* isolate)
- : CompilationSubCache(isolate, kGenerations) {}
-
- MaybeHandle<Code> Lookup(Handle<SharedFunctionInfo> key);
- void Put(Handle<SharedFunctionInfo> key, Handle<Code> value);
-
- void Age() override;
-
- // TODO(jgruber,v8:8888): For simplicity we use the generational
- // approach here, but could consider something else (or more
- // generations) in the future.
- static constexpr int kGenerations = 2;
-
- static void TraceAgeing();
- static void TraceInsertion(Handle<SharedFunctionInfo> key,
- Handle<Code> value);
- static void TraceHit(Handle<SharedFunctionInfo> key, Handle<Code> value);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheCode);
-};
-
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
// the source string as the key. For regular expressions the
@@ -206,8 +180,6 @@ class V8_EXPORT_PRIVATE CompilationCache {
MaybeHandle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags);
- MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> sfi);
-
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
void PutScript(Handle<String> source, LanguageMode language_mode,
@@ -225,8 +197,6 @@ class V8_EXPORT_PRIVATE CompilationCache {
void PutRegExp(Handle<String> source, JSRegExp::Flags flags,
Handle<FixedArray> data);
- void PutCode(Handle<SharedFunctionInfo> shared, Handle<Code> code);
-
// Clear the cache - also used to initialize the cache at startup.
void Clear();
@@ -269,9 +239,8 @@ class V8_EXPORT_PRIVATE CompilationCache {
CompilationCacheEval eval_global_;
CompilationCacheEval eval_contextual_;
CompilationCacheRegExp reg_exp_;
- CompilationCacheCode code_;
- static constexpr int kSubCacheCount = 5;
+ static constexpr int kSubCacheCount = 4;
CompilationSubCache* subcaches_[kSubCacheCount];
// Current enable state of the compilation cache for scripts and eval.
diff --git a/chromium/v8/src/codegen/compiler.cc b/chromium/v8/src/codegen/compiler.cc
index e46639d90a4..9de4ae24a3a 100644
--- a/chromium/v8/src/codegen/compiler.cc
+++ b/chromium/v8/src/codegen/compiler.cc
@@ -559,10 +559,10 @@ void InstallInterpreterTrampolineCopy(
script_name, line_num, column_num));
}
-template <typename LocalIsolate>
+template <typename IsolateT>
void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
Handle<SharedFunctionInfo> shared_info,
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (compilation_info->has_bytecode_array()) {
DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once.
DCHECK(!compilation_info->has_asm_wasm_data());
@@ -585,7 +585,7 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info,
#if V8_ENABLE_WEBASSEMBLY
DCHECK(compilation_info->has_asm_wasm_data());
// We should only have asm/wasm data when finalizing on the main thread.
- DCHECK((std::is_same<LocalIsolate, Isolate>::value));
+ DCHECK((std::is_same<IsolateT, Isolate>::value));
shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data());
shared_info->set_feedback_metadata(
ReadOnlyRoots(isolate).empty_feedback_metadata());
@@ -606,13 +606,15 @@ void LogUnoptimizedCompilation(Isolate* isolate,
RecordUnoptimizedCompilationStats(isolate, shared_info);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
void EnsureSharedFunctionInfosArrayOnScript(Handle<Script> script,
ParseInfo* parse_info,
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
DCHECK(parse_info->flags().is_toplevel());
- if (script->shared_function_infos().length() > 0) {
- DCHECK_EQ(script->shared_function_infos().length(),
+ if (script->shared_function_info_count() > 0) {
+ DCHECK_LE(script->shared_function_info_count(),
+ script->shared_function_infos().length());
+ DCHECK_EQ(script->shared_function_info_count(),
parse_info->max_function_literal_id() + 1);
return;
}
@@ -640,26 +642,6 @@ void UpdateSharedFunctionFlagsAfterCompilation(FunctionLiteral* literal,
shared_info.SetScopeInfo(*literal->scope()->scope_info());
}
-bool CanCompileWithBaseline(Isolate* isolate,
- Handle<SharedFunctionInfo> shared) {
- // Check if we actually have bytecode.
- if (!shared->HasBytecodeArray()) return false;
-
- // Do not optimize when debugger needs to hook into every call.
- if (isolate->debug()->needs_check_on_function_call()) return false;
-
- // Functions with breakpoints have to stay interpreted.
- if (shared->HasBreakInfo()) return false;
-
- // Do not baseline compile if sparkplug is disabled or function doesn't pass
- // sparkplug_filter.
- if (!FLAG_sparkplug || !shared->PassesFilter(FLAG_sparkplug_filter)) {
- return false;
- }
-
- return true;
-}
-
bool CompileSharedWithBaseline(Isolate* isolate,
Handle<SharedFunctionInfo> shared,
Compiler::ClearExceptionFlag flag,
@@ -671,7 +653,7 @@ bool CompileSharedWithBaseline(Isolate* isolate,
if (shared->HasBaselineData()) return true;
// Check if we actually can compile with baseline.
- if (!CanCompileWithBaseline(isolate, shared)) return false;
+ if (!CanCompileWithBaseline(isolate, *shared)) return false;
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(kStackSpaceRequiredForCompilation * KB)) {
@@ -714,10 +696,10 @@ bool CompileSharedWithBaseline(Isolate* isolate,
// Finalize a single compilation job. This function can return
// RETRY_ON_MAIN_THREAD if the job cannot be finalized off-thread, in which case
// it should be safe to call it again on the main thread with the same job.
-template <typename LocalIsolate>
+template <typename IsolateT>
CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
UnoptimizedCompilationJob* job, Handle<SharedFunctionInfo> shared_info,
- LocalIsolate* isolate,
+ IsolateT* isolate,
FinalizeUnoptimizedCompilationDataList*
finalize_unoptimized_compilation_data_list) {
UnoptimizedCompilationInfo* compilation_info = job->compilation_info();
@@ -736,9 +718,8 @@ CompilationJob::Status FinalizeSingleUnoptimizedCompilationJob(
isolate, shared_info, coverage_info, job->time_taken_to_execute(),
job->time_taken_to_finalize());
}
- DCHECK_IMPLIES(
- status == CompilationJob::RETRY_ON_MAIN_THREAD,
- (std::is_same<LocalIsolate, v8::internal::LocalIsolate>::value));
+ DCHECK_IMPLIES(status == CompilationJob::RETRY_ON_MAIN_THREAD,
+ (std::is_same<IsolateT, LocalIsolate>::value));
return status;
}
@@ -801,9 +782,9 @@ bool RecursivelyExecuteUnoptimizedCompilationJobs(
return true;
}
-template <typename LocalIsolate>
+template <typename IsolateT>
bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
- LocalIsolate* isolate, Handle<SharedFunctionInfo> outer_shared_info,
+ IsolateT* isolate, Handle<SharedFunctionInfo> outer_shared_info,
Handle<Script> script, ParseInfo* parse_info,
AccountingAllocator* allocator, IsCompiledScope* is_compiled_scope,
FinalizeUnoptimizedCompilationDataList*
@@ -849,7 +830,7 @@ bool IterativelyExecuteAndFinalizeUnoptimizedCompilationJobs(
case CompilationJob::RETRY_ON_MAIN_THREAD:
// This should not happen on the main thread.
- DCHECK((!std::is_same<LocalIsolate, Isolate>::value));
+ DCHECK((!std::is_same<IsolateT, Isolate>::value));
DCHECK_NOT_NULL(jobs_to_retry_finalization_on_main_thread);
// Clear the literal and ParseInfo to prevent further attempts to
@@ -943,11 +924,9 @@ bool FinalizeDeferredUnoptimizedCompilationJobs(
V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BytecodeOffset osr_offset,
CodeKind code_kind) {
- RuntimeCallTimerScope runtimeTimer(
- function->GetIsolate(),
- RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
- Handle<SharedFunctionInfo> shared(function->shared(), function->GetIsolate());
Isolate* isolate = function->GetIsolate();
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileGetFromOptimizedCodeMap);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate);
DisallowGarbageCollection no_gc;
Code code;
if (osr_offset.IsNone() && function->has_feedback_vector()) {
@@ -975,8 +954,6 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
}
void ClearOptimizedCodeCache(OptimizedCompilationInfo* compilation_info) {
- DCHECK(!CodeKindIsNativeContextIndependentJSFunction(
- compilation_info->code_kind()));
Handle<JSFunction> function = compilation_info->closure();
if (compilation_info->osr_offset().IsNone()) {
Handle<FeedbackVector> vector =
@@ -1015,31 +992,6 @@ void InsertCodeIntoOptimizedCodeCache(
}
}
-void InsertCodeIntoCompilationCache(Isolate* isolate,
- OptimizedCompilationInfo* info) {
- if (!CodeKindIsNativeContextIndependentJSFunction(info->code_kind())) return;
-
- DCHECK(info->osr_offset().IsNone());
-
- Handle<Code> code = info->code();
- DCHECK(!info->function_context_specializing());
-
- Handle<SharedFunctionInfo> sfi = info->shared_info();
- CompilationCache* cache = isolate->compilation_cache();
- cache->PutCode(sfi, code);
- DCHECK(!cache->LookupCode(sfi).is_null());
-
- sfi->set_may_have_cached_code(true);
-
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceInsertion(sfi, code);
-}
-
-V8_WARN_UNUSED_RESULT MaybeHandle<Code> GetCodeFromCompilationCache(
- Isolate* isolate, Handle<SharedFunctionInfo> shared) {
- if (!shared->may_have_cached_code()) return {};
- return shared->TryGetCachedCode(isolate);
-}
-
// Runs PrepareJob in the proper compilation & canonical scopes. Handles will be
// allocated in a persistent handle scope that is detached and handed off to the
// {compilation_info} after PrepareJob.
@@ -1054,8 +1006,7 @@ bool PrepareJobWithHandleScope(OptimizedCompilationJob* job, Isolate* isolate,
bool GetOptimizedCodeNow(OptimizedCompilationJob* job, Isolate* isolate,
OptimizedCompilationInfo* compilation_info) {
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kOptimizeNonConcurrent);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeNonConcurrent);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeNonConcurrent");
@@ -1111,8 +1062,7 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
}
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kOptimizeConcurrentPrepare);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeConcurrentPrepare);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentPrepare");
@@ -1134,11 +1084,10 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
function->SetOptimizationMarker(OptimizationMarker::kInOptimizationQueue);
}
- // Note: Usually the active tier is expected to be Ignition or NCI at this
- // point (in other words we don't expect to optimize if the function is
- // already TF-optimized). There is a special case for OSR though, for which
- // we *can* reach this point even if we've already generated non-OSR'd TF
- // code.
+ // Note: Usually the active tier is expected to be Ignition at this point (in
+ // other words we don't expect to optimize if the function is already
+ // TF-optimized). There is a special case for OSR though, for which we *can*
+ // reach this point even if we've already generated non-OSR'd TF code.
DCHECK(function->shared().HasBytecodeArray());
return true;
}
@@ -1148,13 +1097,7 @@ bool GetOptimizedCodeLater(std::unique_ptr<OptimizedCompilationJob> job,
Handle<Code> ContinuationForConcurrentOptimization(
Isolate* isolate, Handle<JSFunction> function) {
Handle<Code> cached_code;
- if (FLAG_turbo_nci && function->NextTier() == CodeKindForTopTier() &&
- GetCodeFromCompilationCache(isolate, handle(function->shared(), isolate))
- .ToHandle(&cached_code)) {
- // Tiering up to Turbofan and cached optimized code exists. Continue
- // execution there until TF optimization has finished.
- return cached_code;
- } else if (FLAG_turboprop && function->HasAvailableOptimizedCode()) {
+ if (FLAG_turboprop && function->HasAvailableOptimizedCode()) {
DCHECK(!FLAG_turboprop_as_toptier);
DCHECK(function->NextTier() == CodeKind::TURBOFAN);
// It is possible that we have marked a closure for TurboFan optimization
@@ -1179,23 +1122,26 @@ Handle<Code> ContinuationForConcurrentOptimization(
return BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
}
+enum class GetOptimizedCodeResultHandling {
+ // Default behavior, i.e. install the result, insert into caches, etc.
+ kDefault,
+ // Used only for stress testing. The compilation result should be discarded.
+ kDiscardForTesting,
+};
+
MaybeHandle<Code> GetOptimizedCode(
- Handle<JSFunction> function, ConcurrencyMode mode, CodeKind code_kind,
- BytecodeOffset osr_offset = BytecodeOffset::None(),
- JavaScriptFrame* osr_frame = nullptr) {
+ Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode,
+ CodeKind code_kind, BytecodeOffset osr_offset = BytecodeOffset::None(),
+ JavaScriptFrame* osr_frame = nullptr,
+ GetOptimizedCodeResultHandling result_handling =
+ GetOptimizedCodeResultHandling::kDefault) {
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
- Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
// Make sure we clear the optimization marker on the function so that we
// don't try to re-optimize.
- // If compiling for NCI (which does not use the optimization marker), don't
- // touch the marker to avoid interfering with Turbofan compilation.
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
- function->HasOptimizationMarker()) {
- function->ClearOptimizationMarker();
- }
+ if (function->HasOptimizationMarker()) function->ClearOptimizationMarker();
if (shared->optimization_disabled() &&
shared->disable_optimization_reason() == BailoutReason::kNeverOptimize) {
@@ -1233,25 +1179,9 @@ MaybeHandle<Code> GetOptimizedCode(
DCHECK(shared->is_compiled());
function->feedback_vector().set_profiler_ticks(0);
- // Check the compilation cache (stored on the Isolate, shared between native
- // contexts).
- if (CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
- DCHECK(osr_offset.IsNone());
-
- Handle<Code> cached_code;
- if (GetCodeFromCompilationCache(isolate, shared).ToHandle(&cached_code)) {
- CHECK_EQ(cached_code->kind(), CodeKind::NATIVE_CONTEXT_INDEPENDENT);
- if (FLAG_trace_turbo_nci) {
- CompilationCacheCode::TraceHit(shared, cached_code);
- }
- return cached_code;
- }
- }
-
VMState<COMPILER> state(isolate);
TimerEventScope<TimerEventOptimizeCode> optimize_code_timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kOptimizeCode);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
DCHECK(!isolate->has_pending_exception());
@@ -1265,6 +1195,10 @@ MaybeHandle<Code> GetOptimizedCode(
has_script, osr_offset, osr_frame));
OptimizedCompilationInfo* compilation_info = job->compilation_info();
+ if (result_handling == GetOptimizedCodeResultHandling::kDiscardForTesting) {
+ compilation_info->set_discard_result_for_testing();
+ }
+
// Prepare the job and launch concurrent compilation, or compile now.
if (mode == ConcurrencyMode::kConcurrent) {
if (GetOptimizedCodeLater(std::move(job), isolate, compilation_info,
@@ -1274,7 +1208,6 @@ MaybeHandle<Code> GetOptimizedCode(
} else {
DCHECK_EQ(mode, ConcurrencyMode::kNotConcurrent);
if (GetOptimizedCodeNow(job.get(), isolate, compilation_info)) {
- InsertCodeIntoCompilationCache(isolate, compilation_info);
return compilation_info->code();
}
}
@@ -1283,13 +1216,30 @@ MaybeHandle<Code> GetOptimizedCode(
return {};
}
+// When --stress-concurrent-inlining is enabled, spawn concurrent jobs in
+// addition to non-concurrent compiles to increase coverage in mjsunit tests
+// (where most interesting compiles are non-concurrent). The result of the
+// compilation is thrown out.
+void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
+ Handle<JSFunction> function,
+ ConcurrencyMode mode,
+ CodeKind code_kind) {
+ DCHECK(FLAG_stress_concurrent_inlining &&
+ isolate->concurrent_recompilation_enabled() &&
+ mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->node_observer() == nullptr);
+ USE(GetOptimizedCode(isolate, function, ConcurrencyMode::kConcurrent,
+ code_kind, BytecodeOffset::None(), nullptr,
+ GetOptimizedCodeResultHandling::kDiscardForTesting));
+}
+
bool FailAndClearPendingException(Isolate* isolate) {
isolate->clear_pending_exception();
return false;
}
-template <typename LocalIsolate>
-bool PreparePendingException(LocalIsolate* isolate, ParseInfo* parse_info) {
+template <typename IsolateT>
+bool PreparePendingException(IsolateT* isolate, ParseInfo* parse_info) {
if (parse_info->pending_error_handler()->has_pending_error()) {
parse_info->pending_error_handler()->PrepareErrors(
isolate, parse_info->ast_value_factory());
@@ -1389,8 +1339,7 @@ void FinalizeUnoptimizedScriptCompilation(
FunctionLiteral* literal = it.first;
CompilerDispatcher::JobId job_id = it.second;
MaybeHandle<SharedFunctionInfo> maybe_shared_for_task =
- script->FindSharedFunctionInfo(isolate,
- literal->function_literal_id());
+ Script::FindSharedFunctionInfo(script, isolate, literal);
Handle<SharedFunctionInfo> shared_for_task;
if (maybe_shared_for_task.ToHandle(&shared_for_task)) {
dispatcher->RegisterSharedFunctionInfo(job_id, *shared_for_task);
@@ -1412,7 +1361,7 @@ void CompileAllWithBaseline(Isolate* isolate,
Handle<SharedFunctionInfo> shared_info = finalize_data.function_handle();
IsCompiledScope is_compiled_scope(*shared_info, isolate);
if (!is_compiled_scope.is_compiled()) continue;
- if (!CanCompileWithBaseline(isolate, shared_info)) continue;
+ if (!CanCompileWithBaseline(isolate, *shared_info)) continue;
CompileSharedWithBaseline(isolate, shared_info, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope);
}
@@ -1420,9 +1369,9 @@ void CompileAllWithBaseline(Isolate* isolate,
// Create shared function info for top level and shared function infos array for
// inner functions.
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SharedFunctionInfo> CreateTopLevelSharedFunctionInfo(
- ParseInfo* parse_info, Handle<Script> script, LocalIsolate* isolate) {
+ ParseInfo* parse_info, Handle<Script> script, IsolateT* isolate) {
EnsureSharedFunctionInfosArrayOnScript(script, parse_info, isolate);
DCHECK_EQ(kNoSourcePosition,
parse_info->literal()->function_token_position());
@@ -1440,10 +1389,9 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
- RuntimeCallTimerScope runtimeTimer(
- isolate, parse_info->flags().is_eval()
- ? RuntimeCallCounterId::kCompileEval
- : RuntimeCallCounterId::kCompileScript);
+ RCS_SCOPE(isolate, parse_info->flags().is_eval()
+ ? RuntimeCallCounterId::kCompileEval
+ : RuntimeCallCounterId::kCompileScript);
VMState<BYTECODE_COMPILER> state(isolate);
if (parse_info->literal() == nullptr &&
!parsing::ParseProgram(parse_info, script, maybe_outer_scope_info,
@@ -1494,6 +1442,7 @@ MaybeHandle<SharedFunctionInfo> CompileToplevel(
return shared_info;
}
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
ParseInfo* parse_info) {
if (parse_info->flags().is_toplevel()) {
@@ -1504,6 +1453,7 @@ RuntimeCallCounterId RuntimeCallCounterIdForCompileBackground(
}
return RuntimeCallCounterId::kCompileBackgroundFunction;
}
+#endif // V8_RUNTIME_CALL_STATS
MaybeHandle<SharedFunctionInfo> CompileAndFinalizeOnBackgroundThread(
ParseInfo* parse_info, AccountingAllocator* allocator,
@@ -1514,9 +1464,8 @@ MaybeHandle<SharedFunctionInfo> CompileAndFinalizeOnBackgroundThread(
IsCompiledScope* is_compiled_scope) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileCodeBackground");
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
+ RCS_SCOPE(parse_info->runtime_call_stats(),
+ RuntimeCallCounterIdForCompileBackground(parse_info));
Handle<SharedFunctionInfo> shared_info =
CreateTopLevelSharedFunctionInfo(parse_info, script, isolate);
@@ -1541,9 +1490,8 @@ void CompileOnBackgroundThread(ParseInfo* parse_info,
DisallowHeapAccess no_heap_access;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileCodeBackground");
- RuntimeCallTimerScope runtimeTimer(
- parse_info->runtime_call_stats(),
- RuntimeCallCounterIdForCompileBackground(parse_info));
+ RCS_SCOPE(parse_info->runtime_call_stats(),
+ RuntimeCallCounterIdForCompileBackground(parse_info));
// Generate the unoptimized bytecode or asm-js data.
DCHECK(jobs->empty());
@@ -1557,6 +1505,7 @@ void CompileOnBackgroundThread(ParseInfo* parse_info,
// Character stream shouldn't be used again.
parse_info->ResetCharacterStream();
}
+
} // namespace
CompilationHandleScope::~CompilationHandleScope() {
@@ -1660,8 +1609,8 @@ class V8_NODISCARD OffThreadParseInfoScope {
ParseInfo* parse_info,
WorkerThreadRuntimeCallStats* worker_thread_runtime_stats, int stack_size)
: parse_info_(parse_info),
- original_runtime_call_stats_(parse_info_->runtime_call_stats()),
original_stack_limit_(parse_info_->stack_limit()),
+ original_runtime_call_stats_(parse_info_->runtime_call_stats()),
worker_thread_scope_(worker_thread_runtime_stats) {
parse_info_->SetPerThreadState(GetCurrentStackPosition() - stack_size * KB,
worker_thread_scope_.Get());
@@ -1678,8 +1627,8 @@ class V8_NODISCARD OffThreadParseInfoScope {
private:
ParseInfo* parse_info_;
- RuntimeCallStats* original_runtime_call_stats_;
uintptr_t original_stack_limit_;
+ RuntimeCallStats* original_runtime_call_stats_;
WorkerThreadRuntimeCallStatsScope worker_thread_scope_;
};
@@ -1692,9 +1641,8 @@ void BackgroundCompileTask::Run() {
stack_size_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"BackgroundCompileTask::Run");
- RuntimeCallTimerScope runtimeTimer(
- info_->runtime_call_stats(),
- RuntimeCallCounterId::kCompileBackgroundCompileTask);
+ RCS_SCOPE(info_->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileBackgroundCompileTask);
// Update the character stream's runtime call stats.
info_->character_stream()->set_runtime_call_stats(
@@ -1817,8 +1765,7 @@ bool Compiler::CollectSourcePositions(Isolate* isolate,
DCHECK(!isolate->has_pending_exception());
VMState<BYTECODE_COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileCollectSourcePositions);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileCollectSourcePositions);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CollectSourcePositions");
HistogramTimerScope timer(isolate->counters()->collect_source_positions());
@@ -1894,8 +1841,7 @@ bool Compiler::Compile(Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
VMState<BYTECODE_COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kCompileFunction);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
@@ -2000,9 +1946,19 @@ bool Compiler::Compile(Isolate* isolate, Handle<JSFunction> function,
CompilerTracer::TraceOptimizeForAlwaysOpt(isolate, function,
CodeKindForTopTier());
+ const CodeKind code_kind = CodeKindForTopTier();
+ const ConcurrencyMode concurrency_mode = ConcurrencyMode::kNotConcurrent;
+
+ if (FLAG_stress_concurrent_inlining &&
+ isolate->concurrent_recompilation_enabled() &&
+ concurrency_mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->node_observer() == nullptr) {
+ SpawnDuplicateConcurrentJobForStressTesting(isolate, function,
+ concurrency_mode, code_kind);
+ }
+
Handle<Code> maybe_code;
- if (GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
- CodeKindForTopTier())
+ if (GetOptimizedCode(isolate, function, concurrency_mode, code_kind)
.ToHandle(&maybe_code)) {
code = maybe_code;
}
@@ -2058,8 +2014,8 @@ bool Compiler::FinalizeBackgroundCompileTask(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.FinalizeBackgroundCompileTask");
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
+ RCS_SCOPE(isolate,
+ RuntimeCallCounterId::kCompileFinalizeBackgroundCompileTask);
HandleScope scope(isolate);
ParseInfo* parse_info = task->info();
DCHECK(!parse_info->flags().is_toplevel());
@@ -2099,8 +2055,16 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
DCHECK(CodeKindIsOptimizedJSFunction(code_kind));
DCHECK(AllowCompilation::IsAllowed(isolate));
+ if (FLAG_stress_concurrent_inlining &&
+ isolate->concurrent_recompilation_enabled() &&
+ mode == ConcurrencyMode::kNotConcurrent &&
+ isolate->node_observer() == nullptr) {
+ SpawnDuplicateConcurrentJobForStressTesting(isolate, function, mode,
+ code_kind);
+ }
+
Handle<Code> code;
- if (!GetOptimizedCode(function, mode, code_kind).ToHandle(&code)) {
+ if (!GetOptimizedCode(isolate, function, mode, code_kind).ToHandle(&code)) {
// Optimization failed, get the existing code. We could have optimized code
// from a lower tier here. Unoptimized code must exist already if we are
// optimizing.
@@ -2110,23 +2074,18 @@ bool Compiler::CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
code = ContinuationForConcurrentOptimization(isolate, function);
}
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
- function->set_code(*code, kReleaseStore);
- }
+ function->set_code(*code, kReleaseStore);
// Check postconditions on success.
DCHECK(!isolate->has_pending_exception());
DCHECK(function->shared().is_compiled());
- DCHECK(CodeKindIsNativeContextIndependentJSFunction(code_kind) ||
- function->is_compiled());
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind)) {
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->IsInOptimizationQueue());
- DCHECK_IMPLIES(function->HasOptimizationMarker(),
- function->ChecksOptimizationMarker());
- DCHECK_IMPLIES(function->IsInOptimizationQueue(),
- mode == ConcurrencyMode::kConcurrent);
- }
+ DCHECK(function->is_compiled());
+ DCHECK_IMPLIES(function->HasOptimizationMarker(),
+ function->IsInOptimizationQueue());
+ DCHECK_IMPLIES(function->HasOptimizationMarker(),
+ function->ChecksOptimizationMarker());
+ DCHECK_IMPLIES(function->IsInOptimizationQueue(),
+ mode == ConcurrencyMode::kConcurrent);
return true;
}
@@ -2281,8 +2240,7 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, Handle<Context> context,
// Callback set. Let it decide if code generation is allowed.
VMState<EXTERNAL> state(isolate);
- RuntimeCallTimerScope timer(
- isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
AllowCodeGenerationFromStringsCallback callback =
isolate->allow_code_gen_callback();
return callback(v8::Utils::ToLocal(context), v8::Utils::ToLocal(source));
@@ -2301,8 +2259,7 @@ bool ModifyCodeGenerationFromStrings(Isolate* isolate, Handle<Context> context,
// Callback set. Run it, and use the return value as source, or block
// execution if it's not set.
VMState<EXTERNAL> state(isolate);
- RuntimeCallTimerScope timer(
- isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCodeGenerationFromStringsCallbacks);
ModifyCodeGenerationFromStringsResult result =
isolate->modify_code_gen_callback()
? isolate->modify_code_gen_callback()(v8::Utils::ToLocal(context),
@@ -2881,8 +2838,7 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileDeserialize);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
Handle<SharedFunctionInfo> inner_result;
@@ -2894,7 +2850,6 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
is_compiled_scope = inner_result->is_compiled_scope(isolate);
DCHECK(is_compiled_scope.is_compiled());
compilation_cache->PutScript(source, language_mode, inner_result);
- Handle<Script> script(Script::cast(inner_result->script()), isolate);
maybe_result = inner_result;
} else {
// Deserializer failed. Fall through to compile.
@@ -2970,8 +2925,7 @@ MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
compile_timer.set_consuming_code_cache();
// Then check cached code provided by embedder.
HistogramTimerScope timer(isolate->counters()->compile_deserialize());
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kCompileDeserialize);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
maybe_result = CodeSerializer::Deserialize(isolate, cached_data, source,
@@ -3074,8 +3028,8 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
Handle<Script> script;
if (FLAG_finalize_streaming_on_background && !origin_options.IsModule()) {
- RuntimeCallTimerScope runtimeTimerScope(
- isolate, RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
+ RCS_SCOPE(isolate,
+ RuntimeCallCounterId::kCompilePublishBackgroundFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OffThreadFinalization.Publish");
@@ -3163,15 +3117,14 @@ Compiler::GetSharedFunctionInfoForStreamedScript(
}
// static
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
- FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate) {
+ FunctionLiteral* literal, Handle<Script> script, IsolateT* isolate) {
// Precondition: code has been parsed and scopes have been analyzed.
MaybeHandle<SharedFunctionInfo> maybe_existing;
// Find any previously allocated shared function info for the given literal.
- maybe_existing =
- script->FindSharedFunctionInfo(isolate, literal->function_literal_id());
+ maybe_existing = Script::FindSharedFunctionInfo(script, isolate, literal);
// If we found an existing shared function info, return it.
Handle<SharedFunctionInfo> existing;
@@ -3215,12 +3168,13 @@ template Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script, LocalIsolate* isolate);
// static
-MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Handle<JSFunction> function,
+MaybeHandle<Code> Compiler::GetOptimizedCodeForOSR(Isolate* isolate,
+ Handle<JSFunction> function,
BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame) {
DCHECK(!osr_offset.IsNone());
DCHECK_NOT_NULL(osr_frame);
- return GetOptimizedCode(function, ConcurrencyMode::kNotConcurrent,
+ return GetOptimizedCode(isolate, function, ConcurrencyMode::kNotConcurrent,
CodeKindForOSR(), osr_offset, osr_frame);
}
@@ -3233,17 +3187,14 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
OptimizedCompilationInfo* compilation_info = job->compilation_info();
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kOptimizeConcurrentFinalize);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeConcurrentFinalize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.OptimizeConcurrentFinalize");
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
- CodeKind code_kind = compilation_info->code_kind();
- const bool should_install_code_on_function =
- !CodeKindIsNativeContextIndependentJSFunction(code_kind);
- if (should_install_code_on_function) {
+ const bool use_result = !compilation_info->discard_result_for_testing();
+ if (V8_LIKELY(use_result)) {
// Reset profiler ticks, function is no longer considered hot.
compilation_info->closure()->feedback_vector().set_profiler_ticks(0);
}
@@ -3263,10 +3214,9 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
isolate);
job->RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG,
isolate);
- InsertCodeIntoOptimizedCodeCache(compilation_info);
- InsertCodeIntoCompilationCache(isolate, compilation_info);
- CompilerTracer::TraceCompletedJob(isolate, compilation_info);
- if (should_install_code_on_function) {
+ if (V8_LIKELY(use_result)) {
+ InsertCodeIntoOptimizedCodeCache(compilation_info);
+ CompilerTracer::TraceCompletedJob(isolate, compilation_info);
compilation_info->closure()->set_code(*compilation_info->code(),
kReleaseStore);
}
@@ -3276,11 +3226,12 @@ bool Compiler::FinalizeOptimizedCompilationJob(OptimizedCompilationJob* job,
DCHECK_EQ(job->state(), CompilationJob::State::kFailed);
CompilerTracer::TraceAbortedJob(isolate, compilation_info);
- compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
- // Clear the InOptimizationQueue marker, if it exists.
- if (!CodeKindIsNativeContextIndependentJSFunction(code_kind) &&
- compilation_info->closure()->IsInOptimizationQueue()) {
- compilation_info->closure()->ClearOptimizationMarker();
+ if (V8_LIKELY(use_result)) {
+ compilation_info->closure()->set_code(shared->GetCode(), kReleaseStore);
+ // Clear the InOptimizationQueue marker, if it exists.
+ if (compilation_info->closure()->IsInOptimizationQueue()) {
+ compilation_info->closure()->ClearOptimizationMarker();
+ }
}
return CompilationJob::FAILED;
}
diff --git a/chromium/v8/src/codegen/compiler.h b/chromium/v8/src/codegen/compiler.h
index 7ff1b5eecd4..e7d05b3ba3b 100644
--- a/chromium/v8/src/codegen/compiler.h
+++ b/chromium/v8/src/codegen/compiler.h
@@ -190,9 +190,10 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for the given function literal
// node (the code may be lazily compiled).
- template <typename LocalIsolate>
- static Handle<SharedFunctionInfo> GetSharedFunctionInfo(
- FunctionLiteral* node, Handle<Script> script, LocalIsolate* isolate);
+ template <typename IsolateT>
+ static Handle<SharedFunctionInfo> GetSharedFunctionInfo(FunctionLiteral* node,
+ Handle<Script> script,
+ IsolateT* isolate);
// ===========================================================================
// The following family of methods provides support for OSR. Code generated
@@ -205,7 +206,7 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Generate and return optimized code for OSR, or empty handle on failure.
V8_WARN_UNUSED_RESULT static MaybeHandle<Code> GetOptimizedCodeForOSR(
- Handle<JSFunction> function, BytecodeOffset osr_offset,
+ Isolate* isolate, Handle<JSFunction> function, BytecodeOffset osr_offset,
JavaScriptFrame* osr_frame);
};
diff --git a/chromium/v8/src/codegen/constants-arch.h b/chromium/v8/src/codegen/constants-arch.h
index cea8dc068fe..2417be5d4dc 100644
--- a/chromium/v8/src/codegen/constants-arch.h
+++ b/chromium/v8/src/codegen/constants-arch.h
@@ -6,23 +6,23 @@
#define V8_CODEGEN_CONSTANTS_ARCH_H_
#if V8_TARGET_ARCH_ARM
-#include "src/codegen/arm/constants-arm.h" // NOLINT
+#include "src/codegen/arm/constants-arm.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/codegen/arm64/constants-arm64.h" // NOLINT
+#include "src/codegen/arm64/constants-arm64.h"
#elif V8_TARGET_ARCH_IA32
-#include "src/codegen/ia32/constants-ia32.h" // NOLINT
+#include "src/codegen/ia32/constants-ia32.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/codegen/mips/constants-mips.h" // NOLINT
+#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/codegen/mips64/constants-mips64.h" // NOLINT
+#include "src/codegen/mips64/constants-mips64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
-#include "src/codegen/ppc/constants-ppc.h" // NOLINT
+#include "src/codegen/ppc/constants-ppc.h"
#elif V8_TARGET_ARCH_S390
-#include "src/codegen/s390/constants-s390.h" // NOLINT
+#include "src/codegen/s390/constants-s390.h"
#elif V8_TARGET_ARCH_X64
-#include "src/codegen/x64/constants-x64.h" // NOLINT
+#include "src/codegen/x64/constants-x64.h"
#elif V8_TARGET_ARCH_RISCV64
-#include "src/codegen/riscv64/constants-riscv64.h" // NOLINT
+#include "src/codegen/riscv64/constants-riscv64.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/chromium/v8/src/codegen/cpu-features.h b/chromium/v8/src/codegen/cpu-features.h
index b9a450ea3a0..6833ee60d08 100644
--- a/chromium/v8/src/codegen/cpu-features.h
+++ b/chromium/v8/src/codegen/cpu-features.h
@@ -109,6 +109,9 @@ class V8_EXPORT_PRIVATE CpuFeatures : public AllStatic {
return (supported_ & (1u << f)) != 0;
}
+ static void SetSupported(CpuFeature f) { supported_ |= 1u << f; }
+ static void SetUnsupported(CpuFeature f) { supported_ &= ~(1u << f); }
+
static bool SupportsWasmSimd128();
static inline bool SupportsOptimizer();
diff --git a/chromium/v8/src/codegen/external-reference-table.cc b/chromium/v8/src/codegen/external-reference-table.cc
index 2741bd8ec2a..6c109861a2c 100644
--- a/chromium/v8/src/codegen/external-reference-table.cc
+++ b/chromium/v8/src/codegen/external-reference-table.cc
@@ -33,20 +33,24 @@ namespace internal {
// clang-format off
const char* const
ExternalReferenceTable::ref_name_[ExternalReferenceTable::kSize] = {
+ // === Isolate independent ===
// Special references:
"nullptr",
- // External references:
+ // External references (without isolate):
EXTERNAL_REFERENCE_LIST(ADD_EXT_REF_NAME)
- EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXT_REF_NAME)
// Builtins:
BUILTIN_LIST_C(ADD_BUILTIN_NAME)
// Runtime functions:
FOR_EACH_INTRINSIC(ADD_RUNTIME_FUNCTION)
- // Isolate addresses:
- FOR_EACH_ISOLATE_ADDRESS_NAME(ADD_ISOLATE_ADDR)
// Accessors:
ACCESSOR_INFO_LIST_GENERATOR(ADD_ACCESSOR_INFO_NAME, /* not used */)
ACCESSOR_SETTER_LIST(ADD_ACCESSOR_SETTER_NAME)
+
+ // === Isolate dependent ===
+ // External references (with isolate):
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXT_REF_NAME)
+ // Isolate addresses:
+ FOR_EACH_ISOLATE_ADDRESS_NAME(ADD_ISOLATE_ADDR)
// Stub cache:
"Load StubCache::primary_->key",
"Load StubCache::primary_->value",
@@ -72,6 +76,11 @@ const char* const
#undef ADD_ACCESSOR_SETTER_NAME
#undef ADD_STATS_COUNTER_NAME
+namespace {
+static Address ref_addr_isolate_independent_
+ [ExternalReferenceTable::kSizeIsolateIndependent] = {0};
+} // namespace
+
// Forward declarations for C++ builtins.
#define FORWARD_DECLARE(Name) \
Address Builtin_##Name(int argc, Address* args, Isolate* isolate);
@@ -81,13 +90,10 @@ BUILTIN_LIST_C(FORWARD_DECLARE)
void ExternalReferenceTable::Init(Isolate* isolate) {
int index = 0;
- // kNullAddress is preserved through serialization/deserialization.
- Add(kNullAddress, &index);
- AddReferences(isolate, &index);
- AddBuiltins(&index);
- AddRuntimeFunctions(&index);
+ CopyIsolateIndependentReferences(&index);
+
+ AddIsolateDependentReferences(isolate, &index);
AddIsolateAddresses(isolate, &index);
- AddAccessors(&index);
AddStubCache(isolate, &index);
AddNativeCodeStatsCounters(isolate, &index);
is_initialized_ = static_cast<uint32_t>(true);
@@ -108,28 +114,66 @@ const char* ExternalReferenceTable::ResolveSymbol(void* address) {
#endif // SYMBOLIZE_FUNCTION
}
+void ExternalReferenceTable::InitializeOncePerProcess() {
+ int index = 0;
+
+ // kNullAddress is preserved through serialization/deserialization.
+ AddIsolateIndependent(kNullAddress, &index);
+ AddIsolateIndependentReferences(&index);
+ AddBuiltins(&index);
+ AddRuntimeFunctions(&index);
+ AddAccessors(&index);
+
+ CHECK_EQ(kSizeIsolateIndependent, index);
+}
+
+const char* ExternalReferenceTable::NameOfIsolateIndependentAddress(
+ Address address) {
+ for (int i = 0; i < kSizeIsolateIndependent; i++) {
+ if (ref_addr_isolate_independent_[i] == address) {
+ return ref_name_[i];
+ }
+ }
+ return "<unknown>";
+}
+
void ExternalReferenceTable::Add(Address address, int* index) {
ref_addr_[(*index)++] = address;
}
-void ExternalReferenceTable::AddReferences(Isolate* isolate, int* index) {
+void ExternalReferenceTable::AddIsolateIndependent(Address address,
+ int* index) {
+ ref_addr_isolate_independent_[(*index)++] = address;
+}
+
+void ExternalReferenceTable::AddIsolateIndependentReferences(int* index) {
CHECK_EQ(kSpecialReferenceCount, *index);
#define ADD_EXTERNAL_REFERENCE(name, desc) \
- Add(ExternalReference::name().address(), index);
+ AddIsolateIndependent(ExternalReference::name().address(), index);
EXTERNAL_REFERENCE_LIST(ADD_EXTERNAL_REFERENCE)
#undef ADD_EXTERNAL_REFERENCE
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent,
+ *index);
+}
+
+void ExternalReferenceTable::AddIsolateDependentReferences(Isolate* isolate,
+ int* index) {
+ CHECK_EQ(kSizeIsolateIndependent, *index);
+
#define ADD_EXTERNAL_REFERENCE(name, desc) \
Add(ExternalReference::name(isolate).address(), index);
EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(ADD_EXTERNAL_REFERENCE)
#undef ADD_EXTERNAL_REFERENCE
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent,
+ *index);
}
void ExternalReferenceTable::AddBuiltins(int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount, *index);
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent,
+ *index);
static const Address c_builtins[] = {
#define DEF_ENTRY(Name, ...) FUNCTION_ADDR(&Builtin_##Name),
@@ -137,16 +181,16 @@ void ExternalReferenceTable::AddBuiltins(int* index) {
#undef DEF_ENTRY
};
for (Address addr : c_builtins) {
- Add(ExternalReference::Create(addr).address(), index);
+ AddIsolateIndependent(ExternalReference::Create(addr).address(), index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount,
*index);
}
void ExternalReferenceTable::AddRuntimeFunctions(int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount,
*index);
@@ -157,33 +201,38 @@ void ExternalReferenceTable::AddRuntimeFunctions(int* index) {
};
for (Runtime::FunctionId fId : runtime_functions) {
- Add(ExternalReference::Create(fId).address(), index);
+ AddIsolateIndependent(ExternalReference::Create(fId).address(), index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount + kRuntimeReferenceCount,
*index);
}
+void ExternalReferenceTable::CopyIsolateIndependentReferences(int* index) {
+ CHECK_EQ(0, *index);
+
+ std::copy(ref_addr_isolate_independent_,
+ ref_addr_isolate_independent_ + kSizeIsolateIndependent, ref_addr_);
+ *index += kSizeIsolateIndependent;
+}
+
void ExternalReferenceTable::AddIsolateAddresses(Isolate* isolate, int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent,
*index);
for (int i = 0; i < IsolateAddressId::kIsolateAddressCount; ++i) {
Add(isolate->get_address_from_id(static_cast<IsolateAddressId>(i)), index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
kIsolateAddressReferenceCount,
*index);
}
void ExternalReferenceTable::AddAccessors(int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount,
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
+ kBuiltinsReferenceCount + kRuntimeReferenceCount,
*index);
static const Address accessors[] = {
@@ -199,19 +248,18 @@ void ExternalReferenceTable::AddAccessors(int* index) {
};
for (Address addr : accessors) {
- Add(addr, index);
+ AddIsolateIndependent(addr, index);
}
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
+ CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount,
+ kAccessorReferenceCount,
*index);
}
void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount,
*index);
StubCache* load_stub_cache = isolate->load_stub_cache();
@@ -235,10 +283,8 @@ void ExternalReferenceTable::AddStubCache(Isolate* isolate, int* index) {
index);
Add(store_stub_cache->map_reference(StubCache::kSecondary).address(), index);
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount,
*index);
}
@@ -251,10 +297,8 @@ Address ExternalReferenceTable::GetStatsCounterAddress(StatsCounter* counter) {
void ExternalReferenceTable::AddNativeCodeStatsCounters(Isolate* isolate,
int* index) {
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount,
*index);
Counters* counters = isolate->counters();
@@ -263,10 +307,9 @@ void ExternalReferenceTable::AddNativeCodeStatsCounters(Isolate* isolate,
STATS_COUNTER_NATIVE_CODE_LIST(SC)
#undef SC
- CHECK_EQ(kSpecialReferenceCount + kExternalReferenceCount +
- kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount + kStatsCountersReferenceCount,
+ CHECK_EQ(kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount +
+ kStatsCountersReferenceCount,
*index);
CHECK_EQ(kSize, *index);
}
diff --git a/chromium/v8/src/codegen/external-reference-table.h b/chromium/v8/src/codegen/external-reference-table.h
index 9f75d0fa741..0bf42477ae9 100644
--- a/chromium/v8/src/codegen/external-reference-table.h
+++ b/chromium/v8/src/codegen/external-reference-table.h
@@ -24,8 +24,10 @@ class ExternalReferenceTable {
public:
// For the nullptr ref, see the constructor.
static constexpr int kSpecialReferenceCount = 1;
- static constexpr int kExternalReferenceCount =
- ExternalReference::kExternalReferenceCount;
+ static constexpr int kExternalReferenceCountIsolateIndependent =
+ ExternalReference::kExternalReferenceCountIsolateIndependent;
+ static constexpr int kExternalReferenceCountIsolateDependent =
+ ExternalReference::kExternalReferenceCountIsolateDependent;
static constexpr int kBuiltinsReferenceCount =
#define COUNT_C_BUILTIN(...) +1
BUILTIN_LIST_C(COUNT_C_BUILTIN);
@@ -42,11 +44,14 @@ class ExternalReferenceTable {
#define SC(...) +1
STATS_COUNTER_NATIVE_CODE_LIST(SC);
#undef SC
- static constexpr int kSize =
- kSpecialReferenceCount + kExternalReferenceCount +
+ static constexpr int kSizeIsolateIndependent =
+ kSpecialReferenceCount + kExternalReferenceCountIsolateIndependent +
kBuiltinsReferenceCount + kRuntimeReferenceCount +
- kIsolateAddressReferenceCount + kAccessorReferenceCount +
- kStubCacheReferenceCount + kStatsCountersReferenceCount;
+ kAccessorReferenceCount;
+ static constexpr int kSize =
+ kSizeIsolateIndependent + kExternalReferenceCountIsolateDependent +
+ kIsolateAddressReferenceCount + kStubCacheReferenceCount +
+ kStatsCountersReferenceCount;
static constexpr uint32_t kEntrySize =
static_cast<uint32_t>(kSystemPointerSize);
static constexpr uint32_t kSizeInBytes = kSize * kEntrySize + 2 * kUInt32Size;
@@ -63,6 +68,9 @@ class ExternalReferenceTable {
return i * kEntrySize;
}
+ static void InitializeOncePerProcess();
+ static const char* NameOfIsolateIndependentAddress(Address address);
+
const char* NameFromOffset(uint32_t offset) {
DCHECK_EQ(offset % kEntrySize, 0);
DCHECK_LT(offset, kSizeInBytes);
@@ -76,13 +84,18 @@ class ExternalReferenceTable {
void Init(Isolate* isolate);
private:
+ static void AddIsolateIndependent(Address address, int* index);
+
+ static void AddIsolateIndependentReferences(int* index);
+ static void AddBuiltins(int* index);
+ static void AddRuntimeFunctions(int* index);
+ static void AddAccessors(int* index);
+
void Add(Address address, int* index);
- void AddReferences(Isolate* isolate, int* index);
- void AddBuiltins(int* index);
- void AddRuntimeFunctions(int* index);
+ void CopyIsolateIndependentReferences(int* index);
+ void AddIsolateDependentReferences(Isolate* isolate, int* index);
void AddIsolateAddresses(Isolate* isolate, int* index);
- void AddAccessors(int* index);
void AddStubCache(Isolate* isolate, int* index);
Address GetStatsCounterAddress(StatsCounter* counter);
diff --git a/chromium/v8/src/codegen/external-reference.cc b/chromium/v8/src/codegen/external-reference.cc
index 454b04e8935..3e91306b158 100644
--- a/chromium/v8/src/codegen/external-reference.cc
+++ b/chromium/v8/src/codegen/external-reference.cc
@@ -819,6 +819,9 @@ ExternalReference ExternalReference::search_string_raw() {
FUNCTION_REFERENCE(jsarray_array_join_concat_to_sequential_string,
JSArray::ArrayJoinConcatToSequentialString)
+FUNCTION_REFERENCE(length_tracking_gsab_backed_typed_array_length,
+ JSTypedArray::LengthTrackingGsabBackedTypedArrayLength)
+
ExternalReference ExternalReference::search_string_raw_one_one() {
return search_string_raw<const uint8_t, const uint8_t>();
}
@@ -965,6 +968,11 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
+ExternalReference ExternalReference::promise_hook_flags_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->promise_hook_flags_address());
+}
+
ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
return ExternalReference(isolate->promise_hook_address());
}
@@ -974,21 +982,6 @@ ExternalReference ExternalReference::async_event_delegate_address(
return ExternalReference(isolate->async_event_delegate_address());
}
-ExternalReference
-ExternalReference::promise_hook_or_async_event_delegate_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate->promise_hook_or_async_event_delegate_address());
-}
-
-ExternalReference ExternalReference::
- promise_hook_or_debug_is_active_or_async_event_delegate_address(
- Isolate* isolate) {
- return ExternalReference(
- isolate
- ->promise_hook_or_debug_is_active_or_async_event_delegate_address());
-}
-
ExternalReference ExternalReference::debug_execution_mode_address(
Isolate* isolate) {
return ExternalReference(isolate->debug_execution_mode_address());
@@ -1027,11 +1020,6 @@ ExternalReference ExternalReference::debug_suspended_generator_address(
return ExternalReference(isolate->debug()->suspended_generator_address());
}
-ExternalReference ExternalReference::debug_restart_fp_address(
- Isolate* isolate) {
- return ExternalReference(isolate->debug()->restart_fp_address());
-}
-
ExternalReference ExternalReference::fast_c_call_caller_fp_address(
Isolate* isolate) {
return ExternalReference(
diff --git a/chromium/v8/src/codegen/external-reference.h b/chromium/v8/src/codegen/external-reference.h
index f53db401c9e..f75a5c694ad 100644
--- a/chromium/v8/src/codegen/external-reference.h
+++ b/chromium/v8/src/codegen/external-reference.h
@@ -50,13 +50,9 @@ class StatsCounter;
V(handle_scope_limit_address, "HandleScope::limit") \
V(scheduled_exception_address, "Isolate::scheduled_exception") \
V(address_of_pending_message_obj, "address_of_pending_message_obj") \
+ V(promise_hook_flags_address, "Isolate::promise_hook_flags_address()") \
V(promise_hook_address, "Isolate::promise_hook_address()") \
V(async_event_delegate_address, "Isolate::async_event_delegate_address()") \
- V(promise_hook_or_async_event_delegate_address, \
- "Isolate::promise_hook_or_async_event_delegate_address()") \
- V(promise_hook_or_debug_is_active_or_async_event_delegate_address, \
- "Isolate::promise_hook_or_debug_is_active_or_async_event_delegate_" \
- "address()") \
V(debug_execution_mode_address, "Isolate::debug_execution_mode_address()") \
V(debug_is_active_address, "Debug::is_active_address()") \
V(debug_hook_on_function_call_address, \
@@ -66,7 +62,6 @@ class StatsCounter;
V(is_profiling_address, "Isolate::is_profiling") \
V(debug_suspended_generator_address, \
"Debug::step_suspended_generator_address()") \
- V(debug_restart_fp_address, "Debug::restart_fp_address()") \
V(fast_c_call_caller_fp_address, \
"IsolateData::fast_c_call_caller_fp_address") \
V(fast_c_call_caller_pc_address, \
@@ -174,6 +169,8 @@ class StatsCounter;
V(jsarray_array_join_concat_to_sequential_string, \
"jsarray_array_join_concat_to_sequential_string") \
V(jsreceiver_create_identity_hash, "jsreceiver_create_identity_hash") \
+ V(length_tracking_gsab_backed_typed_array_length, \
+ "LengthTrackingGsabBackedTypedArrayLength") \
V(libc_memchr_function, "libc_memchr") \
V(libc_memcpy_function, "libc_memcpy") \
V(libc_memmove_function, "libc_memmove") \
@@ -342,10 +339,11 @@ class ExternalReference {
PROFILING_GETTER_CALL
};
- static constexpr int kExternalReferenceCount =
#define COUNT_EXTERNAL_REFERENCE(name, desc) +1
- EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE)
- EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(COUNT_EXTERNAL_REFERENCE);
+ static constexpr int kExternalReferenceCountIsolateIndependent =
+ EXTERNAL_REFERENCE_LIST(COUNT_EXTERNAL_REFERENCE);
+ static constexpr int kExternalReferenceCountIsolateDependent =
+ EXTERNAL_REFERENCE_LIST_WITH_ISOLATE(COUNT_EXTERNAL_REFERENCE);
#undef COUNT_EXTERNAL_REFERENCE
ExternalReference() : address_(kNullAddress) {}
diff --git a/chromium/v8/src/codegen/handler-table.h b/chromium/v8/src/codegen/handler-table.h
index e1626e2be50..5b83bf4a696 100644
--- a/chromium/v8/src/codegen/handler-table.h
+++ b/chromium/v8/src/codegen/handler-table.h
@@ -97,8 +97,8 @@ class V8_EXPORT_PRIVATE HandlerTable {
int NumberOfReturnEntries() const;
#ifdef ENABLE_DISASSEMBLER
- void HandlerTableRangePrint(std::ostream& os); // NOLINT
- void HandlerTableReturnPrint(std::ostream& os); // NOLINT
+ void HandlerTableRangePrint(std::ostream& os);
+ void HandlerTableReturnPrint(std::ostream& os);
#endif
private:
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.cc b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
index 809df1daef0..688b038e917 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.cc
@@ -138,39 +138,38 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
- // To deal with any combination of flags (e.g. --no-enable-sse4-1
- // --enable-sse-4-2), we start checking from the "highest" supported
- // extension, for each extension, enable if newer extension is supported.
- if (cpu.has_avx2() && FLAG_enable_avx2 && IsSupported(AVX)) {
- supported_ |= 1u << AVX2;
+ if (cpu.has_sse42()) SetSupported(SSE4_2);
+ if (cpu.has_sse41()) SetSupported(SSE4_1);
+ if (cpu.has_ssse3()) SetSupported(SSSE3);
+ if (cpu.has_sse3()) SetSupported(SSE3);
+ if (cpu.has_avx() && cpu.has_osxsave() && OSHasAVXSupport()) {
+ SetSupported(AVX);
+ if (cpu.has_avx2()) SetSupported(AVX2);
+ if (cpu.has_fma3()) SetSupported(FMA3);
}
- if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() &&
- OSHasAVXSupport()) {
- supported_ |= 1u << FMA3;
- }
- if ((cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
- OSHasAVXSupport()) ||
- IsSupported(AVX2) || IsSupported(FMA3)) {
- supported_ |= 1u << AVX;
- }
- if ((cpu.has_sse42() && FLAG_enable_sse4_2) || IsSupported(AVX))
- supported_ |= 1u << SSE4_2;
- if ((cpu.has_sse41() && FLAG_enable_sse4_1) || IsSupported(SSE4_2))
- supported_ |= 1u << SSE4_1;
- if ((cpu.has_ssse3() && FLAG_enable_ssse3) || IsSupported(SSE4_1))
- supported_ |= 1u << SSSE3;
- if ((cpu.has_sse3() && FLAG_enable_sse3) || IsSupported(SSSE3))
- supported_ |= 1u << SSE3;
- if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1;
- if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2;
- if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT;
- if (cpu.has_popcnt() && FLAG_enable_popcnt) supported_ |= 1u << POPCNT;
+
+ if (cpu.has_bmi1() && FLAG_enable_bmi1) SetSupported(BMI1);
+ if (cpu.has_bmi2() && FLAG_enable_bmi2) SetSupported(BMI2);
+ if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
+ if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) supported_ |= 1u << ATOM;
+ if (cpu.is_atom()) SetSupported(ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- supported_ |= 1u << ATOM;
+ SetSupported(ATOM);
}
+ // Ensure that supported cpu features make sense. E.g. it is wrong to support
+ // AVX but not SSE4_2, if we have --enable-avx and --no-enable-sse4-2, the
+ // code above would set AVX to supported, and SSE4_2 to unsupported, then the
+ // checks below will set AVX to unsupported.
+ if (!FLAG_enable_sse3) SetUnsupported(SSE3);
+ if (!FLAG_enable_ssse3 || !IsSupported(SSE3)) SetUnsupported(SSSE3);
+ if (!FLAG_enable_sse4_1 || !IsSupported(SSSE3)) SetUnsupported(SSE4_1);
+ if (!FLAG_enable_sse4_2 || !IsSupported(SSE4_1)) SetUnsupported(SSE4_2);
+ if (!FLAG_enable_avx || !IsSupported(SSE4_2)) SetUnsupported(AVX);
+ if (!FLAG_enable_avx2 || !IsSupported(AVX)) SetUnsupported(AVX2);
+ if (!FLAG_enable_fma3 || !IsSupported(AVX)) SetUnsupported(FMA3);
+
// Set a static value on whether Simd is supported.
// This variable is only used for certain archs to query SupportWasmSimd128()
// at runtime in builtins using an extern ref. Other callers should use
@@ -2489,6 +2488,13 @@ void Assembler::movhlps(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x16);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::movlps(XMMRegister dst, Operand src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2980,6 +2986,10 @@ void Assembler::vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
}
+void Assembler::vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vinstr(0x16, dst, src1, src2, kNone, k0F, kWIG);
+}
+
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
vinstr(0x12, dst, src1, src2, kNone, k0F, kWIG);
}
@@ -3276,9 +3286,9 @@ void Assembler::sse4_instr(XMMRegister dst, Operand src, byte prefix,
}
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
- XMMRegister src2, SIMDPrefix pp, LeadingOpcode m,
- VexW w) {
- DCHECK(IsEnabled(AVX));
+ XMMRegister src2, SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
EMIT(op);
@@ -3286,8 +3296,9 @@ void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1,
}
void Assembler::vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w) {
- DCHECK(IsEnabled(AVX));
+ SIMDPrefix pp, LeadingOpcode m, VexW w,
+ CpuFeature feature) {
+ DCHECK(IsEnabled(feature));
EnsureSpace ensure_space(this);
emit_vex_prefix(src1, kL128, pp, m, w);
EMIT(op);
diff --git a/chromium/v8/src/codegen/ia32/assembler-ia32.h b/chromium/v8/src/codegen/ia32/assembler-ia32.h
index 2a8fd3ee28b..806d17a2d4d 100644
--- a/chromium/v8/src/codegen/ia32/assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/assembler-ia32.h
@@ -868,6 +868,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void shufpd(XMMRegister dst, XMMRegister src, byte imm8);
void movhlps(XMMRegister dst, XMMRegister src);
+ void movlhps(XMMRegister dst, XMMRegister src);
void movlps(XMMRegister dst, Operand src);
void movlps(Operand dst, XMMRegister src);
void movhps(XMMRegister dst, Operand src);
@@ -1398,6 +1399,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vshufpd(XMMRegister dst, XMMRegister src1, Operand src2, byte imm8);
void vmovhlps(XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vmovlhps(XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
void vmovlps(Operand dst, XMMRegister src);
void vmovhps(XMMRegister dst, XMMRegister src1, Operand src2);
@@ -1516,6 +1518,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovshdup(XMMRegister dst, XMMRegister src) {
vinstr(0x16, dst, xmm0, src, kF3, k0F, kWIG);
}
+ void vbroadcastss(XMMRegister dst, XMMRegister src) {
+ vinstr(0x18, dst, xmm0, src, k66, k0F38, kW0, AVX2);
+ }
void vbroadcastss(XMMRegister dst, Operand src) {
vinstr(0x18, dst, xmm0, src, k66, k0F38, kW0);
}
@@ -1892,9 +1897,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
byte escape2, byte opcode);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w);
+ SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
- SIMDPrefix pp, LeadingOpcode m, VexW w);
+ SIMDPrefix pp, LeadingOpcode m, VexW w, CpuFeature = AVX);
// Most BMI instructions are similar.
void bmi1(byte op, Register reg, Register vreg, Operand rm);
void bmi2(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
@@ -1933,10 +1938,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// checks that we did not generate too much.
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
+ explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer();
#ifdef DEBUG
- space_before_ = assembler_->available_space();
+ space_before_ = assembler->available_space();
#endif
}
@@ -1948,7 +1953,7 @@ class EnsureSpace {
#endif
private:
- Assembler* assembler_;
+ Assembler* const assembler_;
#ifdef DEBUG
int space_before_;
#endif
diff --git a/chromium/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h b/chromium/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
new file mode 100644
index 00000000000..d079dfd7256
--- /dev/null
+++ b/chromium/v8/src/codegen/ia32/interface-descriptors-ia32-inl.h
@@ -0,0 +1,267 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_IA32_INTERFACE_DESCRIPTORS_IA32_INL_H_
+#define V8_CODEGEN_IA32_INTERFACE_DESCRIPTORS_IA32_INL_H_
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(eax, ecx, edx, edi);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(eax, ecx, edx, edi, esi);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(ecx, edx, esi, edi, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return edx; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return ecx; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return eax; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return no_reg; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return edi;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return edx; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return ecx; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return no_reg; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return no_reg; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() {
+ return no_reg;
+}
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return edi; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return ecx; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return eax; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return esi;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ return edi;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(ecx); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // eax : number of arguments
+ // edi : the target to call
+ return RegisterArray(edi, eax);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // ecx : arguments list length (untagged)
+ // On the stack : arguments list (FixedArray)
+ return RegisterArray(edi, eax, ecx);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // eax : number of arguments
+ // ecx : start index (to support rest parameters)
+ // edi : the target to call
+ return RegisterArray(edi, eax, ecx);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // edx : function template info
+ // ecx : number of arguments (on the stack, not including receiver)
+ return RegisterArray(edx, ecx);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // ecx : the object to spread
+ return RegisterArray(edi, eax, ecx);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // edi : the target to call
+ // edx : the arguments list
+ return RegisterArray(edi, edx);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // edx : the new target
+ // ecx : arguments list length (untagged)
+ // On the stack : arguments list (FixedArray)
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // eax : number of arguments
+ // edx : the new target
+ // ecx : start index (to support rest parameters)
+ // edi : the target to call
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // eax : number of arguments (on the stack, not including receiver)
+ // edi : the target to call
+ // edx : the new target
+ // ecx : the object to spread
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // edi : the target to call
+ // edx : the new target
+ // ecx : the arguments list
+ return RegisterArray(edi, edx, ecx);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // eax : number of arguments
+ // edx : the new target
+ // edi : the target to call
+ // ecx : allocation site or undefined
+ // TODO(jgruber): Remove the unused allocation site parameter.
+ return RegisterArray(edi, edx, eax, ecx);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(edx); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ return RegisterArray(edx, eax);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ return RegisterArray(edx, eax, ecx);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ return RegisterArray(edx, eax);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ return RegisterArray(edx, eax, ecx);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(edx, // kApiFunctionAddress
+ ecx, // kArgc
+ eax, // kCallData
+ edi); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(eax, // argument count (not including receiver)
+ ecx, // address of first argument
+ edi); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(eax, // argument count (not including receiver)
+ ecx); // address of first argument
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(eax, // the value to pass to the generator
+ edx); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto WasmFloat32ToNumberDescriptor::registers() {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ return RegisterArray(ecx);
+}
+
+// static
+constexpr auto WasmFloat64ToNumberDescriptor::registers() {
+ // Work around using eax, whose register code is 0, and leads to the FP
+ // parameter being passed via xmm0, which is not allocatable on ia32.
+ return RegisterArray(ecx);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_IA32
+
+#endif // V8_CODEGEN_IA32_INTERFACE_DESCRIPTORS_IA32_INL_H_
diff --git a/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc b/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
deleted file mode 100644
index fd76e01590b..00000000000
--- a/chromium/v8/src/codegen/ia32/interface-descriptors-ia32.cc
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- constexpr Register default_stub_registers[] = {eax, ecx, edx, edi};
- STATIC_ASSERT(arraysize(default_stub_registers) == kMaxBuiltinRegisterParams);
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static const Register default_stub_registers[] = {ecx, edx, esi, edi,
- kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {eax, ecx, edx, edi, esi};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- static const Register default_stub_registers[] = {ecx, edx, esi, edi,
- kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return edx; }
-const Register LoadDescriptor::NameRegister() { return ecx; }
-const Register LoadDescriptor::SlotRegister() { return eax; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return no_reg; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return edi;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return edx; }
-const Register StoreDescriptor::NameRegister() { return ecx; }
-const Register StoreDescriptor::ValueRegister() { return no_reg; }
-const Register StoreDescriptor::SlotRegister() { return no_reg; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return no_reg; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return no_reg; }
-const Register StoreTransitionDescriptor::VectorRegister() { return no_reg; }
-const Register StoreTransitionDescriptor::MapRegister() { return edi; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return ecx; }
-const Register ApiGetterDescriptor::CallbackRegister() { return eax; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return ecx; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- return esi;
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return edi; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return eax; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edi : the target to call
- Register registers[] = {edi, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // ecx : arguments list length (untagged)
- // On the stack : arguments list (FixedArray)
- Register registers[] = {edi, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // ecx : start index (to support rest parameters)
- // edi : the target to call
- Register registers[] = {edi, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // edx : function template info
- // ecx : number of arguments (on the stack, not including receiver)
- Register registers[] = {edx, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // ecx : the object to spread
- Register registers[] = {edi, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // edi : the target to call
- // edx : the arguments list
- Register registers[] = {edi, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // edx : the new target
- // ecx : arguments list length (untagged)
- // On the stack : arguments list (FixedArray)
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edx : the new target
- // ecx : start index (to support rest parameters)
- // edi : the target to call
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments (on the stack, not including receiver)
- // edi : the target to call
- // edx : the new target
- // ecx : the object to spread
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // edi : the target to call
- // edx : the new target
- // ecx : the arguments list
- Register registers[] = {edi, edx, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // eax : number of arguments
- // edx : the new target
- // edi : the target to call
- // ecx : allocation site or undefined
- // TODO(jgruber): Remove the unused allocation site parameter.
- Register registers[] = {edi, edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edx, eax, ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- edx, // kApiFunctionAddress
- ecx, // kArgc
- eax, // kCallData
- edi, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (not including receiver)
- ecx, // address of first argument
- edi // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // argument count (not including receiver)
- ecx, // address of first argument
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // the value to pass to the generator
- edx // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- eax, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void WasmFloat32ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // Work around using eax, whose register code is 0, and leads to the FP
- // parameter being passed via xmm0, which is not allocatable on ia32.
- Register registers[] = {ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // Work around using eax, whose register code is 0, and leads to the FP
- // parameter being passed via xmm0, which is not allocatable on ia32.
- Register registers[] = {ecx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
index 9892eb9470e..7c8af3fde00 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -19,7 +19,7 @@
#include "src/codegen/external-reference.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/label.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register.h"
@@ -294,7 +294,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
}
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
// Count all XMM registers except XMM0.
bytes += kDoubleSize * (XMMRegister::kNumRegisters - 1);
}
@@ -316,7 +316,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
}
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
// Save all XMM registers except XMM0.
int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
AllocateStackSpace(delta);
@@ -333,7 +333,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
// Restore all XMM registers except XMM0.
int delta = kDoubleSize * (XMMRegister::kNumRegisters - 1);
for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) {
@@ -365,7 +365,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -374,7 +374,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
lea(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
test_b(dst, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
@@ -383,13 +383,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
}
@@ -511,13 +511,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(value != address);
AssertNotSmi(object);
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
}
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
cmp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -529,7 +529,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of Smis and stores into young gen.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
// Skip barrier if writing a smi.
JumpIfSmi(value, &done, Label::kNear);
}
@@ -549,25 +549,12 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- Label dont_drop;
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- mov(eax, ExternalReferenceAsOperand(restart_fp, eax));
- test(eax, eax);
- j(zero, &dont_drop, Label::kNear);
-
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
- bind(&dont_drop);
-}
-
void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
xorps(dst, dst);
cvtsi2ss(dst, src);
@@ -1029,14 +1016,14 @@ void MacroAssembler::CmpInstanceTypeRange(Register map, Register scratch,
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(equal, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
@@ -1049,7 +1036,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object, Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
@@ -1062,7 +1049,7 @@ void MacroAssembler::AssertFunction(Register object, Register scratch) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
@@ -1073,7 +1060,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -1105,7 +1092,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, scratch, RootIndex::kUndefinedValue);
@@ -1118,7 +1105,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
test(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmi);
}
@@ -1147,7 +1134,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
- if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
+ if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
@@ -1389,8 +1376,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
Move(kRuntimeCallFunctionRegister, Immediate(ext));
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1494,7 +1481,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
if (expected_parameter_count != actual_parameter_count) {
DCHECK_EQ(actual_parameter_count, eax);
DCHECK_EQ(expected_parameter_count, ecx);
@@ -1531,9 +1518,9 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(scratch);
// Extra words are the receiver and the return address (if a jump).
- int extra_words = flag == CALL_FUNCTION ? 1 : 2;
+ int extra_words = type == InvokeType::kCall ? 1 : 2;
lea(num, Operand(eax, extra_words)); // Number of words to copy.
- Set(current, 0);
+ Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
bind(&copy);
mov(scratch, Operand(src, current, times_system_pointer_size, 0));
@@ -1610,9 +1597,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, edi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == edx);
DCHECK(expected_parameter_count == ecx || expected_parameter_count == eax);
@@ -1636,17 +1623,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
mov(ecx, FieldOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(ecx);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(ecx);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(ecx);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(ecx);
+ break;
}
jmp(&done, Label::kNear);
@@ -1661,9 +1650,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunction(Register fun, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(type == InvokeType::kJump || has_frame());
DCHECK(fun == edi);
mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
@@ -1671,7 +1660,7 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
movzx_w(ecx,
FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
- InvokeFunctionCode(edi, new_target, ecx, actual_parameter_count, flag);
+ InvokeFunctionCode(edi, new_target, ecx, actual_parameter_count, type);
}
void MacroAssembler::LoadGlobalProxy(Register dst) {
@@ -1852,34 +1841,6 @@ void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
pshufb(dst, mask);
}
-void TurboAssembler::Pblendw(XMMRegister dst, Operand src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpblendw(dst, dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- pblendw(dst, src, imm8);
- return;
- }
- FATAL("no AVX or SSE4.1 support");
-}
-
-void TurboAssembler::Palignr(XMMRegister dst, Operand src, uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpalignr(dst, dst, src, imm8);
- return;
- }
- if (CpuFeatures::IsSupported(SSSE3)) {
- CpuFeatureScope sse_scope(this, SSSE3);
- palignr(dst, src, imm8);
- return;
- }
- FATAL("no AVX or SSE3 support");
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -1994,19 +1955,6 @@ void TurboAssembler::Vbroadcastss(XMMRegister dst, Operand src) {
shufps(dst, dst, static_cast<byte>(0));
}
-void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- uint8_t imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vshufps(dst, src1, src2, imm8);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- shufps(dst, src2, imm8);
- }
-}
-
void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2079,11 +2027,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cc, AbortReason reason) {
- if (emit_debug_code()) Check(cc, reason);
+ if (FLAG_debug_code) Check(cc, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cc, AbortReason reason) {
@@ -2109,11 +2057,11 @@ void TurboAssembler::CheckStackAlignment() {
}
void TurboAssembler::Abort(AbortReason reason) {
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -2171,7 +2119,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
CheckStackAlignment();
}
diff --git a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
index 4c5c3ade021..a21a3555682 100644
--- a/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/chromium/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -44,9 +44,6 @@ class StatsCounter;
// distinguish memory operands from other operands on ia32.
using MemOperand = Operand;
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
// TODO(victorgomes): Move definition to macro-assembler.h, once all other
// platforms are updated.
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
@@ -122,6 +119,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void CheckStackAlignment();
// Move a constant into a destination using the most efficient encoding.
+ void Move(Register dst, int32_t x) {
+ if (x == 0) {
+ xor_(dst, dst);
+ } else {
+ mov(dst, Immediate(x));
+ }
+ }
void Move(Register dst, const Immediate& src);
void Move(Register dst, Smi src) { Move(dst, Immediate(src)); }
void Move(Register dst, Handle<HeapObject> src);
@@ -301,152 +305,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
-// Only use these macros when non-destructive source of AVX version is not
-// needed.
-#define AVX_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
- void macro_name(dst_type dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else { \
- name(dst, src); \
- } \
- }
-#define AVX_OP3_XO(macro_name, name) \
- AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
- AVX_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
-
- AVX_OP3_XO(Packsswb, packsswb)
- AVX_OP3_XO(Packuswb, packuswb)
- AVX_OP3_XO(Paddusb, paddusb)
- AVX_OP3_XO(Pand, pand)
- AVX_OP3_XO(Pcmpeqb, pcmpeqb)
- AVX_OP3_XO(Pcmpeqw, pcmpeqw)
- AVX_OP3_XO(Pcmpeqd, pcmpeqd)
- AVX_OP3_XO(Por, por)
- AVX_OP3_XO(Psubb, psubb)
- AVX_OP3_XO(Psubw, psubw)
- AVX_OP3_XO(Psubd, psubd)
- AVX_OP3_XO(Psubq, psubq)
- AVX_OP3_XO(Punpcklbw, punpcklbw)
- AVX_OP3_XO(Punpckhbw, punpckhbw)
- AVX_OP3_XO(Punpckldq, punpckldq)
- AVX_OP3_XO(Punpcklqdq, punpcklqdq)
- AVX_OP3_XO(Pxor, pxor)
- AVX_OP3_XO(Andps, andps)
- AVX_OP3_XO(Andpd, andpd)
- AVX_OP3_XO(Xorps, xorps)
- AVX_OP3_XO(Xorpd, xorpd)
- AVX_OP3_XO(Sqrtss, sqrtss)
- AVX_OP3_XO(Sqrtsd, sqrtsd)
- AVX_OP3_XO(Orps, orps)
- AVX_OP3_XO(Orpd, orpd)
- AVX_OP3_XO(Andnpd, andnpd)
- AVX_OP3_WITH_TYPE(Movhlps, movhlps, XMMRegister, XMMRegister)
- AVX_OP3_WITH_TYPE(Psraw, psraw, XMMRegister, uint8_t)
- AVX_OP3_WITH_TYPE(Psrlq, psrlq, XMMRegister, uint8_t)
-
-#undef AVX_OP3_XO
-#undef AVX_OP3_WITH_TYPE
-
-// Same as AVX_OP3_WITH_TYPE but supports a CpuFeatureScope
-#define AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
- sse_scope) \
- void macro_name(dst_type dst, src_type src) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, dst, src); \
- } else if (CpuFeatures::IsSupported(sse_scope)) { \
- CpuFeatureScope scope(this, sse_scope); \
- name(dst, src); \
- } \
- }
-#define AVX_OP2_XO(macro_name, name, sse_scope) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, \
- sse_scope) \
- AVX_OP2_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, sse_scope)
- AVX_OP2_XO(Psignb, psignb, SSSE3)
- AVX_OP2_XO(Psignw, psignw, SSSE3)
- AVX_OP2_XO(Psignd, psignd, SSSE3)
- AVX_OP2_XO(Pcmpeqq, pcmpeqq, SSE4_1)
-#undef AVX_OP2_XO
-#undef AVX_OP2_WITH_TYPE_SCOPE
-
-// Only use this macro when dst and src1 is the same in SSE case.
-#define AVX_PACKED_OP3_WITH_TYPE(macro_name, name, dst_type, src_type) \
- void macro_name(dst_type dst, dst_type src1, src_type src2) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src1, src2); \
- } else { \
- DCHECK_EQ(dst, src1); \
- name(dst, src2); \
- } \
- }
-#define AVX_PACKED_OP3(macro_name, name) \
- AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, XMMRegister) \
- AVX_PACKED_OP3_WITH_TYPE(macro_name, name, XMMRegister, Operand)
-
- AVX_PACKED_OP3(Unpcklps, unpcklps)
- AVX_PACKED_OP3(Andnps, andnps)
- AVX_PACKED_OP3(Addps, addps)
- AVX_PACKED_OP3(Addpd, addpd)
- AVX_PACKED_OP3(Subps, subps)
- AVX_PACKED_OP3(Subpd, subpd)
- AVX_PACKED_OP3(Mulps, mulps)
- AVX_PACKED_OP3(Mulpd, mulpd)
- AVX_PACKED_OP3(Divps, divps)
- AVX_PACKED_OP3(Divpd, divpd)
- AVX_PACKED_OP3(Cmpeqpd, cmpeqpd)
- AVX_PACKED_OP3(Cmpneqpd, cmpneqpd)
- AVX_PACKED_OP3(Cmpltpd, cmpltpd)
- AVX_PACKED_OP3(Cmpleps, cmpleps)
- AVX_PACKED_OP3(Cmplepd, cmplepd)
- AVX_PACKED_OP3(Minps, minps)
- AVX_PACKED_OP3(Minpd, minpd)
- AVX_PACKED_OP3(Maxps, maxps)
- AVX_PACKED_OP3(Maxpd, maxpd)
- AVX_PACKED_OP3(Cmpunordps, cmpunordps)
- AVX_PACKED_OP3(Cmpunordpd, cmpunordpd)
- AVX_PACKED_OP3(Psllw, psllw)
- AVX_PACKED_OP3(Pslld, pslld)
- AVX_PACKED_OP3(Psllq, psllq)
- AVX_PACKED_OP3(Psrlw, psrlw)
- AVX_PACKED_OP3(Psrld, psrld)
- AVX_PACKED_OP3(Psrlq, psrlq)
- AVX_PACKED_OP3(Psraw, psraw)
- AVX_PACKED_OP3(Psrad, psrad)
- AVX_PACKED_OP3(Paddd, paddd)
- AVX_PACKED_OP3(Paddq, paddq)
- AVX_PACKED_OP3(Psubd, psubd)
- AVX_PACKED_OP3(Psubq, psubq)
- AVX_PACKED_OP3(Pmuludq, pmuludq)
- AVX_PACKED_OP3(Pavgb, pavgb)
- AVX_PACKED_OP3(Pavgw, pavgw)
- AVX_PACKED_OP3(Pand, pand)
- AVX_PACKED_OP3(Pminub, pminub)
- AVX_PACKED_OP3(Pmaxub, pmaxub)
- AVX_PACKED_OP3(Paddusb, paddusb)
- AVX_PACKED_OP3(Psubusb, psubusb)
- AVX_PACKED_OP3(Pcmpgtb, pcmpgtb)
- AVX_PACKED_OP3(Pcmpeqb, pcmpeqb)
- AVX_PACKED_OP3(Paddb, paddb)
- AVX_PACKED_OP3(Paddsb, paddsb)
- AVX_PACKED_OP3(Psubb, psubb)
- AVX_PACKED_OP3(Psubsb, psubsb)
-
-#undef AVX_PACKED_OP3
-
- AVX_PACKED_OP3_WITH_TYPE(Psllw, psllw, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Pslld, pslld, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psllq, psllq, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrlw, psrlw, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrld, psrld, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrlq, psrlq, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psraw, psraw, XMMRegister, uint8_t)
- AVX_PACKED_OP3_WITH_TYPE(Psrad, psrad, XMMRegister, uint8_t)
-
-#undef AVX_PACKED_OP3_WITH_TYPE
+ // Defined here because some callers take a pointer to member functions.
+ AVX_OP(Pcmpeqb, pcmpeqb)
+ AVX_OP(Pcmpeqw, pcmpeqw)
+ AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
// Macro for instructions that have 2 operands for AVX version and 1 operand for
// SSE version. Will move src1 to dst if dst != src1.
@@ -468,35 +331,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
AVX_OP3_WITH_MOVE(Pmaddwd, pmaddwd, XMMRegister, Operand)
#undef AVX_OP3_WITH_MOVE
-#define AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, dst_type, src_type, \
- sse_scope) \
- void macro_name(dst_type dst, dst_type src1, src_type src2) { \
- if (CpuFeatures::IsSupported(AVX)) { \
- CpuFeatureScope scope(this, AVX); \
- v##name(dst, src1, src2); \
- return; \
- } \
- if (CpuFeatures::IsSupported(sse_scope)) { \
- CpuFeatureScope scope(this, sse_scope); \
- DCHECK_EQ(dst, src1); \
- name(dst, src2); \
- return; \
- } \
- UNREACHABLE(); \
- }
-#define AVX_OP3_XO_SSE4(macro_name, name) \
- AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, XMMRegister, SSE4_1) \
- AVX_OP3_WITH_TYPE_SCOPE(macro_name, name, XMMRegister, Operand, SSE4_1)
-
- AVX_OP3_WITH_TYPE_SCOPE(Haddps, haddps, XMMRegister, Operand, SSE3)
- AVX_OP3_XO_SSE4(Pmaxsd, pmaxsd)
- AVX_OP3_XO_SSE4(Pminsb, pminsb)
- AVX_OP3_XO_SSE4(Pmaxsb, pmaxsb)
- AVX_OP3_XO_SSE4(Pcmpeqq, pcmpeqq)
-
-#undef AVX_OP3_XO_SSE4
-#undef AVX_OP3_WITH_TYPE_SCOPE
-
// TODO(zhin): Remove after moving more definitions into SharedTurboAssembler.
void Movlps(Operand dst, XMMRegister src) {
SharedTurboAssembler::Movlps(dst, src);
@@ -513,16 +347,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
}
void Pshufb(XMMRegister dst, XMMRegister src, Operand mask);
- void Pblendw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
- Pblendw(dst, Operand(src), imm8);
- }
- void Pblendw(XMMRegister dst, Operand src, uint8_t imm8);
-
- void Palignr(XMMRegister dst, XMMRegister src, uint8_t imm8) {
- Palignr(dst, Operand(src), imm8);
- }
- void Palignr(XMMRegister dst, Operand src, uint8_t imm8);
-
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
void Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
Pinsrb(dst, Operand(src), imm8);
@@ -544,10 +368,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
void Vbroadcastss(XMMRegister dst, Operand src);
- // Shufps that will mov src1 into dst if AVX is not supported.
- void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- uint8_t imm8);
-
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -680,15 +500,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
public:
using TurboAssembler::TurboAssembler;
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int32_t x) {
- if (x == 0) {
- xor_(dst, dst);
- } else {
- mov(dst, Immediate(x));
- }
- }
-
void PushRoot(RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
@@ -722,8 +533,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -732,11 +543,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// write barrier if the value is a smi.
void RecordWrite(
Register object, Register address, Register value, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // Frame restart support
- void MaybeDropFrames();
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter specific kind of exit frame. Expects the number of
// arguments in register eax and sets up the number of arguments in
@@ -768,7 +576,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger.
// This may clobber ecx.
@@ -779,7 +587,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
@@ -865,18 +673,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -921,7 +729,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
void EnterExitFramePrologue(StackFrame::Type frame_type, Register scratch);
void EnterExitFrameEpilogue(int argc, bool save_doubles);
diff --git a/chromium/v8/src/codegen/interface-descriptors-inl.h b/chromium/v8/src/codegen/interface-descriptors-inl.h
new file mode 100644
index 00000000000..273e9d3e8e0
--- /dev/null
+++ b/chromium/v8/src/codegen/interface-descriptors-inl.h
@@ -0,0 +1,484 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_INTERFACE_DESCRIPTORS_INL_H_
+#define V8_CODEGEN_INTERFACE_DESCRIPTORS_INL_H_
+
+#include <utility>
+
+#include "src/base/logging.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/register-arch.h"
+
+#if V8_TARGET_ARCH_X64
+#include "src/codegen/x64/interface-descriptors-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/codegen/arm64/interface-descriptors-arm64-inl.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/codegen/ia32/interface-descriptors-ia32-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/codegen/arm/interface-descriptors-arm-inl.h"
+#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+#include "src/codegen/ppc/interface-descriptors-ppc-inl.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/codegen/s390/interface-descriptors-s390-inl.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/codegen/mips/interface-descriptors-mips-inl.h"
+#elif V8_TARGET_ARCH_RISCV64
+#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+// static
+constexpr std::array<Register, kJSBuiltinRegisterParams>
+CallInterfaceDescriptor::DefaultJSRegisterArray() {
+ return RegisterArray(
+ kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register);
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr auto StaticCallInterfaceDescriptor<DerivedDescriptor>::registers() {
+ return CallInterfaceDescriptor::DefaultRegisterArray();
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr auto StaticJSCallInterfaceDescriptor<DerivedDescriptor>::registers() {
+ return CallInterfaceDescriptor::DefaultJSRegisterArray();
+}
+
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::Initialize(
+ CallInterfaceDescriptorData* data) {
+ // Static local copy of the Registers array, for platform-specific
+ // initialization
+ static auto registers = DerivedDescriptor::registers();
+
+ // The passed pointer should be a modifiable pointer to our own data.
+ DCHECK_EQ(data, this->data());
+ DCHECK(!data->IsInitialized());
+
+ if (DerivedDescriptor::kRestrictAllocatableRegisters) {
+ data->RestrictAllocatableRegisters(registers.data(), registers.size());
+ }
+
+ data->InitializeRegisters(
+ DerivedDescriptor::flags(), DerivedDescriptor::kReturnCount,
+ DerivedDescriptor::GetParameterCount(),
+ DerivedDescriptor::kStackArgumentOrder,
+ DerivedDescriptor::GetRegisterParameterCount(), registers.data());
+
+ // InitializeTypes is customizable by the DerivedDescriptor subclass.
+ DerivedDescriptor::InitializeTypes(data);
+
+ DCHECK(data->IsInitialized());
+ DCHECK(this->CheckFloatingPointParameters(data));
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetReturnCount() {
+ static_assert(
+ DerivedDescriptor::kReturnCount >= 0,
+ "DerivedDescriptor subclass should override return count with a value "
+ "that is greater than 0");
+
+ return DerivedDescriptor::kReturnCount;
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetParameterCount() {
+ static_assert(
+ DerivedDescriptor::kParameterCount >= 0,
+ "DerivedDescriptor subclass should override parameter count with a "
+ "value that is greater than 0");
+
+ return DerivedDescriptor::kParameterCount;
+}
+
+namespace detail {
+
+// Helper trait for statically checking if a type is a std::array<Register,N>.
+template <typename T>
+struct IsRegisterArray : public std::false_type {};
+template <size_t N>
+struct IsRegisterArray<std::array<Register, N>> : public std::true_type {};
+template <>
+struct IsRegisterArray<EmptyRegisterArray> : public std::true_type {};
+
+// Helper for finding the index of the first invalid register in a register
+// array.
+template <size_t N, size_t Index>
+struct FirstInvalidRegisterHelper {
+ static constexpr int Call(std::array<Register, N> regs) {
+ if (!std::get<Index>(regs).is_valid()) {
+ // All registers after the first invalid one have to also be invalid (this
+ // DCHECK will be checked recursively).
+ DCHECK_EQ((FirstInvalidRegisterHelper<N, Index + 1>::Call(regs)),
+ Index + 1);
+ return Index;
+ }
+ return FirstInvalidRegisterHelper<N, Index + 1>::Call(regs);
+ }
+};
+template <size_t N>
+struct FirstInvalidRegisterHelper<N, N> {
+ static constexpr int Call(std::array<Register, N> regs) { return N; }
+};
+template <size_t N, size_t Index = 0>
+constexpr size_t FirstInvalidRegister(std::array<Register, N> regs) {
+ return FirstInvalidRegisterHelper<N, 0>::Call(regs);
+}
+constexpr size_t FirstInvalidRegister(EmptyRegisterArray regs) { return 0; }
+
+} // namespace detail
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetRegisterParameterCount() {
+ static_assert(
+ detail::IsRegisterArray<decltype(DerivedDescriptor::registers())>::value,
+ "DerivedDescriptor subclass should define a registers() function "
+ "returning a std::array<Register>");
+
+ // The register parameter count is the minimum of:
+ // 1. The number of named parameters in the descriptor, and
+ // 2. The number of valid registers the descriptor provides with its
+ // registers() function, e.g. for {rax, rbx, no_reg} this number is 2.
+ // 3. The maximum number of register parameters allowed (
+ // kMaxBuiltinRegisterParams for most builtins,
+ // kMaxTFSBuiltinRegisterParams for TFS builtins, customizable by the
+ // subclass otherwise).
+ return std::min<int>({DerivedDescriptor::GetParameterCount(),
+ static_cast<int>(detail::FirstInvalidRegister(
+ DerivedDescriptor::registers())),
+ DerivedDescriptor::kMaxRegisterParams});
+}
+
+// static
+template <typename DerivedDescriptor>
+constexpr int
+StaticCallInterfaceDescriptor<DerivedDescriptor>::GetStackParameterCount() {
+ return DerivedDescriptor::GetParameterCount() -
+ DerivedDescriptor::GetRegisterParameterCount();
+}
+
+// static
+constexpr Register FastNewObjectDescriptor::TargetRegister() {
+ return kJSFunctionRegister;
+}
+
+// static
+constexpr Register FastNewObjectDescriptor::NewTargetRegister() {
+ return kJavaScriptCallNewTargetRegister;
+}
+
+// static
+constexpr Register ApiGetterDescriptor::ReceiverRegister() {
+ return LoadDescriptor::ReceiverRegister();
+}
+
+// static
+constexpr Register LoadGlobalNoFeedbackDescriptor::ICKindRegister() {
+ return LoadDescriptor::SlotRegister();
+}
+
+// static
+constexpr Register LoadNoFeedbackDescriptor::ICKindRegister() {
+ return LoadGlobalNoFeedbackDescriptor::ICKindRegister();
+}
+
+#if V8_TARGET_ARCH_IA32
+// On ia32, LoadWithVectorDescriptor passes vector on the stack and thus we
+// need to choose a new register here.
+// static
+constexpr Register LoadGlobalWithVectorDescriptor::VectorRegister() {
+ STATIC_ASSERT(!LoadWithVectorDescriptor::VectorRegister().is_valid());
+ return LoadDescriptor::ReceiverRegister();
+}
+#else
+// static
+constexpr Register LoadGlobalWithVectorDescriptor::VectorRegister() {
+ return LoadWithVectorDescriptor::VectorRegister();
+}
+#endif
+
+// static
+constexpr auto LoadDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), NameRegister(), SlotRegister());
+}
+
+// static
+constexpr auto LoadBaselineDescriptor::registers() {
+ return LoadDescriptor::registers();
+}
+
+// static
+constexpr auto LoadGlobalDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::NameRegister(),
+ LoadDescriptor::SlotRegister());
+}
+
+// static
+constexpr auto LoadGlobalBaselineDescriptor::registers() {
+ return LoadGlobalDescriptor::registers();
+}
+
+// static
+constexpr auto StoreDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister());
+}
+
+// static
+constexpr auto StoreBaselineDescriptor::registers() {
+ return StoreDescriptor::registers();
+}
+
+// static
+constexpr auto StoreGlobalDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister());
+}
+
+// static
+constexpr auto StoreGlobalBaselineDescriptor::registers() {
+ return StoreGlobalDescriptor::registers();
+}
+
+// static
+constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
+ return RegisterArray(
+ LoadDescriptor::ReceiverRegister(),
+ LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
+ LoadDescriptor::NameRegister(), LoadDescriptor::SlotRegister());
+}
+
+// static
+constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
+ // TODO(v8:11421): Implement on other platforms.
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
+ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64
+ return RegisterArray(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
+ kInterpreterBytecodeArrayRegister);
+#else
+ return DefaultRegisterArray();
+#endif
+}
+
+// static
+constexpr auto BaselineLeaveFrameDescriptor::registers() {
+ // TODO(v8:11421): Implement on other platforms.
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
+ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64
+ return RegisterArray(ParamsSizeRegister(), WeightRegister());
+#else
+ return DefaultRegisterArray();
+#endif
+}
+
+// static
+constexpr auto VoidDescriptor::registers() { return RegisterArray(); }
+
+// static
+constexpr auto AllocateDescriptor::registers() {
+ return RegisterArray(kAllocateSizeRegister);
+}
+
+// static
+constexpr auto CEntry1ArgvOnStackDescriptor::registers() {
+ return RegisterArray(kRuntimeCallArgCountRegister,
+ kRuntimeCallFunctionRegister);
+}
+
+// static
+constexpr auto InterpreterCEntry1Descriptor::registers() {
+ return RegisterArray(kRuntimeCallArgCountRegister, kRuntimeCallArgvRegister,
+ kRuntimeCallFunctionRegister);
+}
+
+// static
+constexpr auto InterpreterCEntry2Descriptor::registers() {
+ return RegisterArray(kRuntimeCallArgCountRegister, kRuntimeCallArgvRegister,
+ kRuntimeCallFunctionRegister);
+}
+
+// static
+constexpr auto FastNewObjectDescriptor::registers() {
+ return RegisterArray(TargetRegister(), NewTargetRegister());
+}
+
+// static
+constexpr auto TailCallOptimizedCodeSlotDescriptor::registers() {
+ return RegisterArray(kJavaScriptCallCodeStartRegister);
+}
+
+// static
+constexpr auto LoadNoFeedbackDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::ReceiverRegister(),
+ LoadDescriptor::NameRegister(), ICKindRegister());
+}
+
+// static
+constexpr auto LoadGlobalNoFeedbackDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::NameRegister(), ICKindRegister());
+}
+
+// static
+constexpr auto LoadGlobalWithVectorDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::NameRegister(),
+ LoadDescriptor::SlotRegister(), VectorRegister());
+}
+
+// static
+constexpr auto LoadWithReceiverAndVectorDescriptor::registers() {
+ return RegisterArray(
+ LoadDescriptor::ReceiverRegister(), LookupStartObjectRegister(),
+ LoadDescriptor::NameRegister(), LoadDescriptor::SlotRegister(),
+ LoadWithVectorDescriptor::VectorRegister());
+}
+
+// static
+constexpr auto StoreGlobalWithVectorDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
+}
+
+// static
+constexpr auto StoreTransitionDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), MapRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister(),
+ StoreWithVectorDescriptor::VectorRegister());
+}
+
+// static
+constexpr auto TypeConversionDescriptor::registers() {
+ return RegisterArray(ArgumentRegister());
+}
+
+// static
+constexpr auto TypeConversionNoContextDescriptor::registers() {
+ return RegisterArray(TypeConversionDescriptor::ArgumentRegister());
+}
+
+// static
+constexpr auto SingleParameterOnStackDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto AsyncFunctionStackParameterDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto GetIteratorStackParameterDescriptor::registers() {
+ return RegisterArray();
+}
+
+// static
+constexpr auto LoadWithVectorDescriptor::registers() {
+ return RegisterArray(LoadDescriptor::ReceiverRegister(),
+ LoadDescriptor::NameRegister(),
+ LoadDescriptor::SlotRegister(), VectorRegister());
+}
+
+// static
+constexpr auto StoreWithVectorDescriptor::registers() {
+ return RegisterArray(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(),
+ StoreDescriptor::ValueRegister(),
+ StoreDescriptor::SlotRegister(), VectorRegister());
+}
+
+// static
+constexpr auto ApiGetterDescriptor::registers() {
+ return RegisterArray(ReceiverRegister(), HolderRegister(),
+ CallbackRegister());
+}
+
+// static
+constexpr auto ContextOnlyDescriptor::registers() { return RegisterArray(); }
+
+// static
+constexpr auto NoContextDescriptor::registers() { return RegisterArray(); }
+
+// static
+constexpr auto GrowArrayElementsDescriptor::registers() {
+ return RegisterArray(ObjectRegister(), KeyRegister());
+}
+
+// static
+constexpr auto ArrayNArgumentsConstructorDescriptor::registers() {
+ // Keep the arguments on the same registers as they were in
+ // ArrayConstructorDescriptor to avoid unnecessary register moves.
+ // kFunction, kAllocationSite, kActualArgumentsCount
+ return RegisterArray(kJavaScriptCallTargetRegister,
+ kJavaScriptCallExtraArg1Register,
+ kJavaScriptCallArgCountRegister);
+}
+
+// static
+constexpr auto ArrayNoArgumentConstructorDescriptor::registers() {
+ // This descriptor must use the same set of registers as the
+ // ArrayNArgumentsConstructorDescriptor.
+ return ArrayNArgumentsConstructorDescriptor::registers();
+}
+
+// static
+constexpr auto ArraySingleArgumentConstructorDescriptor::registers() {
+ // This descriptor must use the same set of registers as the
+ // ArrayNArgumentsConstructorDescriptor.
+ return ArrayNArgumentsConstructorDescriptor::registers();
+}
+
+// static
+// static
+constexpr Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
+ return GetRegisterParameter(0);
+}
+
+#define DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER(Name, DescriptorName) \
+ template <> \
+ struct CallInterfaceDescriptorFor<Builtins::k##Name> { \
+ using type = DescriptorName##Descriptor; \
+ };
+BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN,
+ /*TFC*/ DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER, IGNORE_BUILTIN,
+ /*TFH*/ DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER, IGNORE_BUILTIN,
+ /*ASM*/ DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER)
+#undef DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER
+#define DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER(Name, ...) \
+ template <> \
+ struct CallInterfaceDescriptorFor<Builtins::k##Name> { \
+ using type = Name##Descriptor; \
+ };
+BUILTIN_LIST_TFS(DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER)
+#undef DEFINE_STATIC_BUILTIN_DESCRIPTOR_GETTER
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_INTERFACE_DESCRIPTORS_INL_H_
diff --git a/chromium/v8/src/codegen/interface-descriptors.cc b/chromium/v8/src/codegen/interface-descriptors.cc
index 53b678580e4..2cafcae3442 100644
--- a/chromium/v8/src/codegen/interface-descriptors.cc
+++ b/chromium/v8/src/codegen/interface-descriptors.cc
@@ -4,49 +4,48 @@
#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
namespace v8 {
namespace internal {
-void CallInterfaceDescriptorData::InitializePlatformSpecific(
- int register_parameter_count, const Register* registers) {
- DCHECK(!IsInitializedPlatformIndependent());
-
- register_param_count_ = register_parameter_count;
-
- // UBSan doesn't like creating zero-length arrays.
- if (register_parameter_count == 0) return;
+void CallInterfaceDescriptorData::InitializeRegisters(
+ Flags flags, int return_count, int parameter_count,
+ StackArgumentOrder stack_order, int register_parameter_count,
+ const Register* registers) {
+ DCHECK(!IsInitializedTypes());
- // InterfaceDescriptor owns a copy of the registers array.
- register_params_ = NewArray<Register>(register_parameter_count, no_reg);
- for (int i = 0; i < register_parameter_count; i++) {
- // The value of the root register must be reserved, thus any uses
- // within the calling convention are disallowed.
#ifdef DEBUG
- CHECK_NE(registers[i], kRootRegister);
+ {
+ // Make sure that the registers are all valid, and don't alias each other.
+ RegList reglist = 0;
+ for (int i = 0; i < register_parameter_count; ++i) {
+ Register reg = registers[i];
+ DCHECK(reg.is_valid());
+ DCHECK_EQ(reglist & reg.bit(), 0);
+ DCHECK_NE(reg, kRootRegister);
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- CHECK_NE(registers[i], kPointerCageBaseRegister);
+ DCHECK_NE(reg, kPtrComprCageBaseRegister);
#endif
- // Check for duplicated registers.
- for (int j = i + 1; j < register_parameter_count; j++) {
- CHECK_NE(registers[i], registers[j]);
+ reglist = CombineRegLists(reglist, reg.bit());
}
-#endif
- register_params_[i] = registers[i];
}
-}
-
-void CallInterfaceDescriptorData::InitializePlatformIndependent(
- Flags flags, int return_count, int parameter_count,
- const MachineType* machine_types, int machine_types_length,
- StackArgumentOrder stack_order) {
- DCHECK(IsInitializedPlatformSpecific());
+#endif
flags_ = flags;
stack_order_ = stack_order;
return_count_ = return_count;
param_count_ = parameter_count;
+ register_param_count_ = register_parameter_count;
+
+ // The caller owns the the registers array, so we just set the pointer.
+ register_params_ = registers;
+}
+
+void CallInterfaceDescriptorData::InitializeTypes(
+ const MachineType* machine_types, int machine_types_length) {
+ DCHECK(IsInitializedRegisters());
const int types_length = return_count_ + param_count_;
// Machine types are either fully initialized or null.
@@ -77,7 +76,6 @@ bool CallInterfaceDescriptorData::AllStackParametersAreTagged() const {
void CallInterfaceDescriptorData::Reset() {
delete[] machine_types_;
machine_types_ = nullptr;
- delete[] register_params_;
register_params_ = nullptr;
}
@@ -105,27 +103,6 @@ void CallDescriptors::TearDown() {
}
}
-void CallInterfaceDescriptor::JSDefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int non_js_register_parameter_count) {
- DCHECK_LE(static_cast<unsigned>(non_js_register_parameter_count), 1);
-
- // 3 is for kTarget, kNewTarget and kActualArgumentsCount
- int register_parameter_count = 3 + non_js_register_parameter_count;
-
- DCHECK(!AreAliased(
- kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
- kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register));
-
- const Register default_js_stub_registers[] = {
- kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
- kJavaScriptCallArgCountRegister, kJavaScriptCallExtraArg1Register};
-
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_js_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_js_stub_registers);
-}
-
const char* CallInterfaceDescriptor::DebugName() const {
CallDescriptors::Key key = CallDescriptors::GetKey(data_);
switch (key) {
@@ -140,492 +117,12 @@ const char* CallInterfaceDescriptor::DebugName() const {
return "";
}
-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
- return true;
-}
-#endif
-
-void VoidDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void AllocateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kAllocateSizeRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CEntry1ArgvOnStackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kRuntimeCallArgCountRegister,
- kRuntimeCallFunctionRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-namespace {
-
-void InterpreterCEntryDescriptor_InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kRuntimeCallArgCountRegister,
- kRuntimeCallArgvRegister,
- kRuntimeCallFunctionRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace
-
-void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {TargetRegister(), NewTargetRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-const Register FastNewObjectDescriptor::TargetRegister() {
- return kJSFunctionRegister;
-}
-
-const Register FastNewObjectDescriptor::NewTargetRegister() {
- return kJavaScriptCallNewTargetRegister;
-}
-
-void TailCallOptimizedCodeSlotDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {kJavaScriptCallCodeStartRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {LoadDescriptor::ReceiverRegister(),
- LoadDescriptor::NameRegister(),
- LoadDescriptor::SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadNoFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ICKindRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadGlobalDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadGlobalBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {LoadGlobalDescriptor::NameRegister(),
- LoadGlobalDescriptor::SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LookupBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void LoadGlobalNoFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), ICKindRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), SlotRegister(), VectorRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void LoadWithReceiverAndVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DCHECK(!AreAliased(ReceiverRegister(), LookupStartObjectRegister(),
- NameRegister(), SlotRegister(), VectorRegister()));
- Register registers[] = {ReceiverRegister(), LookupStartObjectRegister(),
- NameRegister(), SlotRegister(), VectorRegister()};
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void LoadWithReceiverBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- LoadWithReceiverAndVectorDescriptor::ReceiverRegister(),
- LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(),
- LoadWithReceiverAndVectorDescriptor::NameRegister(),
- LoadWithReceiverAndVectorDescriptor::SlotRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void StoreGlobalDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), ValueRegister(), SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreGlobalBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {StoreGlobalDescriptor::NameRegister(),
- StoreGlobalDescriptor::ValueRegister(),
- StoreGlobalDescriptor::SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreGlobalWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {NameRegister(), ValueRegister(), SlotRegister(),
- VectorRegister()};
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(), StoreDescriptor::SlotRegister()};
-
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreTransitionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- ReceiverRegister(), NameRegister(), MapRegister(),
- ValueRegister(), SlotRegister(), VectorRegister(),
- };
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void BaselineOutOfLinePrologueDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_IA32 || \
- V8_TARGET_ARCH_ARM
- Register registers[] = {kContextRegister,
- kJSFunctionRegister,
- kJavaScriptCallArgCountRegister,
- kJavaScriptCallExtraArg1Register,
- kJavaScriptCallNewTargetRegister,
- kInterpreterBytecodeArrayRegister};
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- registers);
-#else
- InitializePlatformUnimplemented(data, kParameterCount);
-#endif
-}
-
-void BaselineLeaveFrameDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
- Register registers[] = {ParamsSizeRegister(), WeightRegister()};
- data->InitializePlatformSpecific(kParameterCount, registers);
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
+ return reg.code() % 2 == 0;
#else
- InitializePlatformUnimplemented(data, kParameterCount);
-#endif
-}
-
-void StringAtDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void StringAtAsStringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void StringSubstringDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void TypeConversionDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ArgumentRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void TypeConversionNoContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {TypeConversionDescriptor::ArgumentRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void TypeConversion_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void SingleParameterOnStackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void AsyncFunctionStackParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void GetIteratorStackParameterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void LoadWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), SlotRegister(),
- VectorRegister()};
- // TODO(jgruber): This DCHECK could be enabled if RegisterBase::ListOf were
- // to allow no_reg entries.
- // DCHECK(!AreAliased(ReceiverRegister(), NameRegister(), SlotRegister(),
- // VectorRegister(), kRootRegister));
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-void StoreWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- SlotRegister(), VectorRegister()};
- // TODO(jgruber): This DCHECK could be enabled if RegisterBase::ListOf were
- // to allow no_reg entries.
- // DCHECK(!AreAliased(ReceiverRegister(), NameRegister(), kRootRegister));
- int len = arraysize(registers) - kStackArgumentsCount;
- data->InitializePlatformSpecific(len, registers);
-}
-
-const Register ApiGetterDescriptor::ReceiverRegister() {
- return LoadDescriptor::ReceiverRegister();
-}
-
-void ApiGetterDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), HolderRegister(),
- CallbackRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ContextOnlyDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void NoContextDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- data->InitializePlatformSpecific(0, nullptr);
-}
-
-void GrowArrayElementsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {ObjectRegister(), KeyRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // This descriptor must use the same set of registers as the
- // ArrayNArgumentsConstructorDescriptor.
- ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(data);
-}
-
-void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // This descriptor must use the same set of registers as the
- // ArrayNArgumentsConstructorDescriptor.
- ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(data);
-}
-
-void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // Keep the arguments on the same registers as they were in
- // ArrayConstructorDescriptor to avoid unnecessary register moves.
- // kFunction, kAllocationSite, kActualArgumentsCount
- Register registers[] = {kJavaScriptCallTargetRegister,
- kJavaScriptCallExtraArg1Register,
- kJavaScriptCallArgCountRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-#if !V8_TARGET_ARCH_IA32
-// We need a custom descriptor on ia32 to avoid using xmm0.
-void WasmFloat32ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-// We need a custom descriptor on ia32 to avoid using xmm0.
-void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-#endif // !V8_TARGET_ARCH_IA32
-
-#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
- !defined(V8_TARGET_ARCH_RISCV64)
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
+ return true;
#endif
-
-void CloneObjectWithVectorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void CloneObjectBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-// static
-Register RunMicrotasksDescriptor::MicrotaskQueueRegister() {
- return CallDescriptors::call_descriptor_data(CallDescriptors::RunMicrotasks)
- ->register_param(0);
-}
-
-void RunMicrotasksDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void I64ToBigIntDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void I32PairToBigIntDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void BigIntToI64Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void BigIntToI32PairDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void BinaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallTrampoline_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void CallWithSpread_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void ConstructWithSpread_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data,
- kParameterCount - kStackArgumentsCount);
-}
-
-void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void Compare_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 4);
-}
-
-void UnaryOp_WithFeedbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 3);
-}
-
-void UnaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, 2);
-}
-
-void ForInPrepareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void SuspendGeneratorBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
-}
-
-void ResumeGeneratorBaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- DefaultInitializePlatformSpecific(data, kParameterCount);
}
} // namespace internal
diff --git a/chromium/v8/src/codegen/interface-descriptors.h b/chromium/v8/src/codegen/interface-descriptors.h
index 8d03907efc0..e64826e6fc1 100644
--- a/chromium/v8/src/codegen/interface-descriptors.h
+++ b/chromium/v8/src/codegen/interface-descriptors.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "src/base/logging.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/register-arch.h"
#include "src/codegen/tnode.h"
@@ -39,6 +40,7 @@ namespace internal {
V(CallFunctionTemplate) \
V(CallTrampoline) \
V(CallTrampoline_Baseline) \
+ V(CallTrampoline_Baseline_Compact) \
V(CallTrampoline_WithFeedback) \
V(CallVarargs) \
V(CallWithArrayLike) \
@@ -68,7 +70,6 @@ namespace internal {
V(EphemeronKeyBarrier) \
V(FastNewObject) \
V(ForInPrepare) \
- V(FrameDropperTrampoline) \
V(GetIteratorStackParameter) \
V(GetProperty) \
V(GrowArrayElements) \
@@ -151,17 +152,21 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
};
using Flags = base::Flags<Flag>;
+ static constexpr int kUninitializedCount = -1;
+
CallInterfaceDescriptorData() = default;
CallInterfaceDescriptorData(const CallInterfaceDescriptorData&) = delete;
CallInterfaceDescriptorData& operator=(const CallInterfaceDescriptorData&) =
delete;
- // A copy of the passed in registers and param_representations is made
- // and owned by the CallInterfaceDescriptorData.
-
- void InitializePlatformSpecific(int register_parameter_count,
- const Register* registers);
+ // The passed registers are owned by the caller, and their lifetime is
+ // expected to exceed that of this data. In practice, they are expected to
+ // be in a static local.
+ void InitializeRegisters(Flags flags, int return_count, int parameter_count,
+ StackArgumentOrder stack_order,
+ int register_parameter_count,
+ const Register* registers);
// if machine_types is null, then an array of size
// (return_count + parameter_count) will be created with
@@ -171,17 +176,13 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
// (return_count + parameter_count). Those members of the parameter array will
// be initialized from {machine_types}, and the rest initialized to
// MachineType::AnyTagged().
- void InitializePlatformIndependent(Flags flags, int return_count,
- int parameter_count,
- const MachineType* machine_types,
- int machine_types_length,
- StackArgumentOrder stack_order);
+ void InitializeTypes(const MachineType* machine_types,
+ int machine_types_length);
void Reset();
bool IsInitialized() const {
- return IsInitializedPlatformSpecific() &&
- IsInitializedPlatformIndependent();
+ return IsInitializedRegisters() && IsInitializedTypes();
}
Flags flags() const { return flags_; }
@@ -189,7 +190,6 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
int param_count() const { return param_count_; }
int register_param_count() const { return register_param_count_; }
Register register_param(int index) const { return register_params_[index]; }
- Register* register_params() const { return register_params_; }
MachineType return_type(int index) const {
DCHECK_LT(index, return_count_);
return machine_types_[index];
@@ -200,9 +200,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
}
StackArgumentOrder stack_order() const { return stack_order_; }
- void RestrictAllocatableRegisters(const Register* registers, int num) {
+ void RestrictAllocatableRegisters(const Register* registers, size_t num) {
DCHECK_EQ(allocatable_registers_, 0);
- for (int i = 0; i < num; ++i) {
+ for (size_t i = 0; i < num; ++i) {
allocatable_registers_ |= registers[i].bit();
}
DCHECK_GT(NumRegs(allocatable_registers_), 0);
@@ -211,17 +211,17 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
RegList allocatable_registers() const { return allocatable_registers_; }
private:
- bool IsInitializedPlatformSpecific() const {
+ bool IsInitializedRegisters() const {
const bool initialized =
- (register_param_count_ == 0 && register_params_ == nullptr) ||
- (register_param_count_ > 0 && register_params_ != nullptr);
- // Platform-specific initialization happens before platform-independent.
+ return_count_ != kUninitializedCount &&
+ param_count_ != kUninitializedCount &&
+ (register_param_count_ == 0 || register_params_ != nullptr);
+ // Register initialization happens before type initialization.
return initialized;
}
- bool IsInitializedPlatformIndependent() const {
- const bool initialized =
- return_count_ >= 0 && param_count_ >= 0 && machine_types_ != nullptr;
- // Platform-specific initialization happens before platform-independent.
+ bool IsInitializedTypes() const {
+ const bool initialized = machine_types_ != nullptr;
+ // Register initialization happens before type initialization.
return initialized;
}
@@ -229,9 +229,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
bool AllStackParametersAreTagged() const;
#endif // DEBUG
- int register_param_count_ = -1;
- int return_count_ = -1;
- int param_count_ = -1;
+ int register_param_count_ = kUninitializedCount;
+ int return_count_ = kUninitializedCount;
+ int param_count_ = kUninitializedCount;
Flags flags_ = kNoFlags;
StackArgumentOrder stack_order_ = StackArgumentOrder::kDefault;
@@ -242,10 +242,10 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
// |registers_params_| defines registers that are used for parameter passing.
// |machine_types_| defines machine types for resulting values and incomping
// parameters.
- // Both arrays are allocated dynamically by the InterfaceDescriptor and
- // freed on destruction. This is because static arrays cause creation of
- // runtime static initializers which we don't want.
- Register* register_params_ = nullptr;
+ // The register params array is owned by the caller, and it's expected that it
+ // is a static local stored in the caller function. The machine types are
+ // allocated dynamically by the InterfaceDescriptor and freed on destruction.
+ const Register* register_params_ = nullptr;
MachineType* machine_types_ = nullptr;
};
@@ -278,12 +278,35 @@ class V8_EXPORT_PRIVATE CallDescriptors : public AllStatic {
call_descriptor_data_[NUMBER_OF_DESCRIPTORS];
};
+#if defined(V8_TARGET_ARCH_IA32)
+// To support all possible cases, we must limit the number of register args for
+// TFS builtins on ia32 to 3. Out of the 6 allocatable registers, esi is taken
+// as the context register and ebx is the root register. One register must
+// remain available to store the jump/call target. Thus 3 registers remain for
+// arguments. The reason this applies to TFS builtins specifically is because
+// this becomes relevant for builtins used as targets of Torque function
+// pointers (which must have a register available to store the target).
+// TODO(jgruber): Ideally we should just decrement kMaxBuiltinRegisterParams but
+// that comes with its own set of complications. It's possible, but requires
+// refactoring the calling convention of other existing stubs.
+constexpr int kMaxBuiltinRegisterParams = 4;
+constexpr int kMaxTFSBuiltinRegisterParams = 3;
+#else
+constexpr int kMaxBuiltinRegisterParams = 5;
+constexpr int kMaxTFSBuiltinRegisterParams = kMaxBuiltinRegisterParams;
+#endif
+STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
+constexpr int kJSBuiltinRegisterParams = 4;
+
+// Polymorphic base class for call interface descriptors, which defines getters
+// for the various descriptor properties via a runtime-loaded
+// CallInterfaceDescriptorData field.
class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
public:
using Flags = CallInterfaceDescriptorData::Flags;
CallInterfaceDescriptor() : data_(nullptr) {}
- virtual ~CallInterfaceDescriptor() = default;
+ ~CallInterfaceDescriptor() = default;
explicit CallInterfaceDescriptor(CallDescriptors::Key key)
: data_(CallDescriptors::call_descriptor_data(key)) {}
@@ -333,7 +356,9 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
return data()->stack_order();
}
- static const Register ContextRegister();
+ static constexpr inline Register ContextRegister() {
+ return kContextRegister;
+ }
const char* DebugName() const;
@@ -344,39 +369,13 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
protected:
const CallInterfaceDescriptorData* data() const { return data_; }
- virtual void InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
- UNREACHABLE();
- }
-
- // Initializes |data| to an unspecified state, for platforms that haven't
- // implemented a given builtin.
- static void InitializePlatformUnimplemented(CallInterfaceDescriptorData* data,
- int register_parameter_count) {
- DefaultInitializePlatformSpecific(data,
- std::min(register_parameter_count, 4));
- }
-
- virtual void InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // Default descriptor configuration: one result, all parameters are passed
- // in registers and all parameters have MachineType::AnyTagged() type.
- data->InitializePlatformIndependent(
- CallInterfaceDescriptorData::kNoFlags, 1, data->register_param_count(),
- nullptr, 0, StackArgumentOrder::kDefault);
- }
-
- // Initializes |data| using the platform dependent default set of registers.
- // It is intended to be used for TurboFan stubs when particular set of
- // registers does not matter.
- static void DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count);
-
- // Initializes |data| using the platform dependent default set of registers
- // for JavaScript-compatible calling convention.
- // It is intended to be used for TurboFan stubs being called with JavaScript
- // linkage + additional parameters on registers and stack.
- static void JSDefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int non_js_register_parameter_count);
+ // Helper for defining the default register set.
+ //
+ // Use auto for the return type to allow different architectures to have
+ // differently sized default register arrays.
+ static constexpr inline auto DefaultRegisterArray();
+ static constexpr inline std::array<Register, kJSBuiltinRegisterParams>
+ DefaultJSRegisterArray();
// Checks if float parameters are not assigned invalid registers.
bool CheckFloatingPointParameters(CallInterfaceDescriptorData* data) {
@@ -393,104 +392,164 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
bool IsValidFloatParameterRegister(Register reg);
private:
+ const CallInterfaceDescriptorData* data_;
+};
+
+// CRTP base class for call interface descriptors, which defines static getters
+// for the various descriptor properties based on static values defined in the
+// subclass.
+template <typename DerivedDescriptor>
+class StaticCallInterfaceDescriptor : public CallInterfaceDescriptor {
+ public:
+ // ===========================================================================
+ // The following are the descriptor's CRTP configuration points, overwritable
+ // by DerivedDescriptor.
+ static constexpr int kReturnCount =
+ CallInterfaceDescriptorData::kUninitializedCount;
+ static constexpr int kParameterCount =
+ CallInterfaceDescriptorData::kUninitializedCount;
+ static constexpr bool kNoContext = false;
+ static constexpr bool kAllowVarArgs = false;
+ static constexpr bool kNoStackScan = false;
+ static constexpr auto kStackArgumentOrder = StackArgumentOrder::kDefault;
+
+ // The set of registers available to the parameters, as a
+ // std::array<Register,N>. Can be larger or smaller than kParameterCount; if
+ // larger then any remaining registers are ignored; if smaller, any parameters
+ // after registers().size() will be stack registers.
+ //
+ // Defaults to CallInterfaceDescriptor::DefaultRegisterArray().
+ static constexpr inline auto registers();
+
+ // An additional limit on the number of register parameters allowed. This is
+ // here so that it can be overwritten to kMaxTFSBuiltinRegisterParams for TFS
+ // builtins, see comment on kMaxTFSBuiltinRegisterParams above.
+ static constexpr int kMaxRegisterParams = kMaxBuiltinRegisterParams;
+
+ // If set to true, the descriptor will restrict the set of allocatable
+ // registers to the set returned by registers(). Then, it is expected that
+ // the first kParameterCount registers() are the parameters of the builtin.
+ static constexpr bool kRestrictAllocatableRegisters = false;
+
+ // End of customization points.
+ // ===========================================================================
+
+ static constexpr inline Flags flags() {
+ return Flags((DerivedDescriptor::kNoContext
+ ? CallInterfaceDescriptorData::kNoContext
+ : 0) |
+ (DerivedDescriptor::kAllowVarArgs
+ ? CallInterfaceDescriptorData::kAllowVarArgs
+ : 0) |
+ (DerivedDescriptor::kNoStackScan
+ ? CallInterfaceDescriptorData::kNoStackScan
+ : 0));
+ }
+ static constexpr inline bool AllowVarArgs() {
+ return DerivedDescriptor::kAllowVarArgs;
+ }
+ static constexpr inline bool HasContextParameter() {
+ return !DerivedDescriptor::kNoContext;
+ }
+
+ static constexpr inline int GetReturnCount();
+ static constexpr inline int GetParameterCount();
+ static constexpr inline int GetRegisterParameterCount();
+ static constexpr inline int GetStackParameterCount();
+ static constexpr inline Register* GetRegisterData();
+
+ static constexpr inline Register GetRegisterParameter(int i) {
+ return DerivedDescriptor::registers()[i];
+ }
+
+ explicit StaticCallInterfaceDescriptor(CallDescriptors::Key key)
+ : CallInterfaceDescriptor(key) {}
+
+ private:
// {CallDescriptors} is allowed to call the private {Initialize} method.
friend class CallDescriptors;
- const CallInterfaceDescriptorData* data_;
+ inline void Initialize(CallInterfaceDescriptorData* data);
- void Initialize(CallInterfaceDescriptorData* data) {
- // The passed pointer should be a modifiable pointer to our own data.
- DCHECK_EQ(data, data_);
- DCHECK(!data->IsInitialized());
- InitializePlatformSpecific(data);
- InitializePlatformIndependent(data);
- DCHECK(data->IsInitialized());
- DCHECK(CheckFloatingPointParameters(data));
+ // Set up the types of the descriptor. This is a static function, so that it
+ // is overwritable by subclasses. By default, all parameters have
+ // MachineType::AnyTagged() type.
+ static void InitializeTypes(CallInterfaceDescriptorData* data) {
+ data->InitializeTypes(nullptr, 0);
}
};
-#define DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- public: \
- explicit name() : base(key()) {} \
- static inline CallDescriptors::Key key();
+template <typename Descriptor>
+class StaticJSCallInterfaceDescriptor
+ : public StaticCallInterfaceDescriptor<Descriptor> {
+ public:
+ static constexpr auto kStackArgumentOrder = StackArgumentOrder::kJS;
+ static constexpr inline auto registers();
-#if defined(V8_TARGET_ARCH_IA32)
-// To support all possible cases, we must limit the number of register args for
-// TFS builtins on ia32 to 3. Out of the 6 allocatable registers, esi is taken
-// as the context register and ebx is the root register. One register must
-// remain available to store the jump/call target. Thus 3 registers remain for
-// arguments. The reason this applies to TFS builtins specifically is because
-// this becomes relevant for builtins used as targets of Torque function
-// pointers (which must have a register available to store the target).
-// TODO(jgruber): Ideally we should just decrement kMaxBuiltinRegisterParams but
-// that comes with its own set of complications. It's possible, but requires
-// refactoring the calling convention of other existing stubs.
-constexpr int kMaxBuiltinRegisterParams = 4;
-constexpr int kMaxTFSBuiltinRegisterParams = 3;
-#else
-constexpr int kMaxBuiltinRegisterParams = 5;
-constexpr int kMaxTFSBuiltinRegisterParams = kMaxBuiltinRegisterParams;
-#endif
-STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
+ using StaticCallInterfaceDescriptor<
+ Descriptor>::StaticCallInterfaceDescriptor;
+};
-#define DECLARE_DEFAULT_DESCRIPTOR(name, base) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- static const int kRegisterParams = \
- kParameterCount > kMaxTFSBuiltinRegisterParams \
- ? kMaxTFSBuiltinRegisterParams \
- : kParameterCount; \
- static const int kStackParams = kParameterCount - kRegisterParams; \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- DefaultInitializePlatformSpecific(data, kRegisterParams); \
- } \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
- data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount, \
- kParameterCount, nullptr, 0, \
- kStackArgumentOrder); \
- } \
- name(CallDescriptors::Key key) : base(key) {} \
- \
- public:
-
-#define DECLARE_JS_COMPATIBLE_DESCRIPTOR(name, base, \
- non_js_reg_parameters_count) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- JSDefaultInitializePlatformSpecific(data, non_js_reg_parameters_count); \
- } \
- name(CallDescriptors::Key key) : base(key) {} \
- \
- public:
-
-#define DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS(flags, stack_order, \
- return_count, ...) \
- static constexpr int kDescriptorFlags = flags; \
- static constexpr int kReturnCount = return_count; \
- static constexpr StackArgumentOrder kStackArgumentOrder = stack_order; \
- enum ParameterIndices { \
- __dummy = -1, /* to be able to pass zero arguments */ \
- ##__VA_ARGS__, \
- \
- kParameterCount, \
- kContext = kParameterCount /* implicit parameter */ \
- };
+template <Builtins::Name kBuiltin>
+struct CallInterfaceDescriptorFor;
+
+// Stub class replacing std::array<Register, 0>, as a workaround for MSVC's
+// https://github.com/microsoft/STL/issues/942
+struct EmptyRegisterArray {
+ Register* data() { return nullptr; }
+ size_t size() const { return 0; }
+ Register operator[](size_t i) const { UNREACHABLE(); }
+};
-#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, \
- return_count, ##__VA_ARGS__)
+// Helper method for defining an array of registers for the various
+// Descriptor::registers() methods.
+template <typename... Registers>
+constexpr std::array<Register, 1 + sizeof...(Registers)> RegisterArray(
+ Register first_reg, Registers... regs) {
+ return {first_reg, regs...};
+}
+constexpr EmptyRegisterArray RegisterArray() { return {}; }
+
+#define DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ public: \
+ /* StaticCallInterfaceDescriptor can call Initialize methods */ \
+ friend class StaticCallInterfaceDescriptor<name>; \
+ explicit name() : base(key()) {} \
+ static inline CallDescriptors::Key key();
+
+#define DECLARE_DEFAULT_DESCRIPTOR(name) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, StaticCallInterfaceDescriptor) \
+ static constexpr int kMaxRegisterParams = kMaxTFSBuiltinRegisterParams; \
+ \
+ protected: \
+ explicit name(CallDescriptors::Key key) \
+ : StaticCallInterfaceDescriptor(key) {} \
+ \
+ public:
+
+#define DECLARE_JS_COMPATIBLE_DESCRIPTOR(name) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, StaticJSCallInterfaceDescriptor) \
+ protected: \
+ explicit name(CallDescriptors::Key key) \
+ : StaticJSCallInterfaceDescriptor(key) {} \
+ \
+ public:
+
+#define DEFINE_RESULT_AND_PARAMETERS(return_count, ...) \
+ static constexpr int kReturnCount = return_count; \
+ enum ParameterIndices { \
+ __dummy = -1, /* to be able to pass zero arguments */ \
+ ##__VA_ARGS__, \
+ \
+ kParameterCount, \
+ kContext = kParameterCount /* implicit parameter */ \
+ };
// This is valid only for builtins that use EntryFrame, which does not scan
// stack arguments on GC.
#define DEFINE_PARAMETERS_ENTRY(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kNoContext | \
- CallInterfaceDescriptorData::kNoStackScan; \
+ static constexpr bool kNoContext = true; \
+ static constexpr bool kNoStackScan = true; \
static constexpr StackArgumentOrder kStackArgumentOrder = \
StackArgumentOrder::kDefault; \
static constexpr int kReturnCount = 1; \
@@ -501,37 +560,37 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
kParameterCount \
};
-#define DEFINE_PARAMETERS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoFlags, StackArgumentOrder::kDefault, 1, \
- ##__VA_ARGS__)
+#define DEFINE_PARAMETERS(...) DEFINE_RESULT_AND_PARAMETERS(1, ##__VA_ARGS__)
+
+#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
+ DEFINE_PARAMETERS(__VA_ARGS__) \
+ static constexpr bool kNoContext = true;
+
+#define DEFINE_PARAMETERS_VARARGS(...) \
+ DEFINE_PARAMETERS(__VA_ARGS__) \
+ static constexpr bool kAllowVarArgs = true; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS;
-#define DEFINE_PARAMETERS_NO_CONTEXT(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kNoContext, StackArgumentOrder::kDefault, \
- 1, ##__VA_ARGS__)
+#define DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(...) \
+ DEFINE_PARAMETERS_NO_CONTEXT(__VA_ARGS__) \
+ static constexpr bool kAllowVarArgs = true; \
+ static constexpr StackArgumentOrder kStackArgumentOrder = \
+ StackArgumentOrder::kJS;
-#define DEFINE_PARAMETERS_VARARGS(...) \
- DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS( \
- CallInterfaceDescriptorData::kAllowVarArgs, StackArgumentOrder::kJS, 1, \
- ##__VA_ARGS__)
+#define DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(return_count, ...) \
+ DEFINE_RESULT_AND_PARAMETERS(return_count, ##__VA_ARGS__) \
+ static constexpr bool kNoContext = true;
-#define DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(flag, ...) \
- void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
- override { \
+#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
+ static void InitializeTypes(CallInterfaceDescriptorData* data) { \
MachineType machine_types[] = {__VA_ARGS__}; \
static_assert( \
kReturnCount + kParameterCount == arraysize(machine_types), \
"Parameter names definition is not consistent with parameter types"); \
- data->InitializePlatformIndependent( \
- Flags(flag | kDescriptorFlags), kReturnCount, kParameterCount, \
- machine_types, arraysize(machine_types), kStackArgumentOrder); \
+ data->InitializeTypes(machine_types, arraysize(machine_types)); \
}
-#define DEFINE_RESULT_AND_PARAMETER_TYPES(...) \
- DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG( \
- CallInterfaceDescriptorData::kNoFlags, __VA_ARGS__)
-
#define DEFINE_PARAMETER_TYPES(...) \
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged() /* result */, \
##__VA_ARGS__)
@@ -539,8 +598,7 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
// When the extra arguments described here are located in the stack, they are
// just above the return address in the frame (first arguments).
#define DEFINE_JS_PARAMETERS(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kAllowVarArgs; \
+ static constexpr bool kAllowVarArgs = true; \
static constexpr int kReturnCount = 1; \
static constexpr StackArgumentOrder kStackArgumentOrder = \
StackArgumentOrder::kJS; \
@@ -554,9 +612,8 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
};
#define DEFINE_JS_PARAMETERS_NO_CONTEXT(...) \
- static constexpr int kDescriptorFlags = \
- CallInterfaceDescriptorData::kAllowVarArgs | \
- CallInterfaceDescriptorData::kNoContext; \
+ static constexpr bool kAllowVarArgs = true; \
+ static constexpr bool kNoContext = true; \
static constexpr int kReturnCount = 1; \
static constexpr StackArgumentOrder kStackArgumentOrder = \
StackArgumentOrder::kJS; \
@@ -574,63 +631,22 @@ STATIC_ASSERT(kMaxTFSBuiltinRegisterParams <= kMaxBuiltinRegisterParams);
MachineType::Int32(), /* kActualArgumentsCount */ \
##__VA_ARGS__)
-#define DECLARE_DESCRIPTOR(name, base) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override; \
- name(CallDescriptors::Key key) : base(key) {} \
- \
+#define DECLARE_DESCRIPTOR(name) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, StaticCallInterfaceDescriptor) \
+ protected: \
+ explicit name(CallDescriptors::Key key) \
+ : StaticCallInterfaceDescriptor(key) {} \
+ \
public:
-class V8_EXPORT_PRIVATE VoidDescriptor : public CallInterfaceDescriptor {
+class V8_EXPORT_PRIVATE VoidDescriptor
+ : public StaticCallInterfaceDescriptor<VoidDescriptor> {
public:
DEFINE_PARAMETERS()
DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(VoidDescriptor, CallInterfaceDescriptor)
-};
-
-// This class is subclassed by Torque-generated call interface descriptors.
-template <int return_count, int parameter_count, bool has_context_parameter>
-class TorqueInterfaceDescriptor : public CallInterfaceDescriptor {
- public:
- static constexpr int kDescriptorFlags =
- has_context_parameter ? CallInterfaceDescriptorData::kNoFlags
- : CallInterfaceDescriptorData::kNoContext;
- static constexpr int kParameterCount = parameter_count;
- enum ParameterIndices { kContext = kParameterCount };
- template <int i>
- static ParameterIndices ParameterIndex() {
- STATIC_ASSERT(0 <= i && i < kParameterCount);
- return static_cast<ParameterIndices>(i);
- }
- static constexpr int kReturnCount = return_count;
-
- using CallInterfaceDescriptor::CallInterfaceDescriptor;
+ DECLARE_DESCRIPTOR(VoidDescriptor)
- protected:
- static const int kRegisterParams =
- kParameterCount > kMaxTFSBuiltinRegisterParams
- ? kMaxTFSBuiltinRegisterParams
- : kParameterCount;
- static const int kStackParams = kParameterCount - kRegisterParams;
- virtual std::vector<MachineType> ReturnType() = 0;
- virtual std::array<MachineType, kParameterCount> ParameterTypes() = 0;
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) override {
- DefaultInitializePlatformSpecific(data, kRegisterParams);
- }
- void InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) override {
- std::vector<MachineType> machine_types = ReturnType();
- DCHECK_EQ(kReturnCount, machine_types.size());
- auto parameter_types = ParameterTypes();
- machine_types.insert(machine_types.end(), parameter_types.begin(),
- parameter_types.end());
- DCHECK_EQ(kReturnCount + kParameterCount, machine_types.size());
- data->InitializePlatformIndependent(Flags(kDescriptorFlags), kReturnCount,
- kParameterCount, machine_types.data(),
- static_cast<int>(machine_types.size()),
- StackArgumentOrder::kDefault);
- }
+ static constexpr auto registers();
};
// Dummy descriptor used to mark builtins that don't yet have their proper
@@ -646,180 +662,171 @@ using CCallDescriptor = VoidDescriptor;
// here.
using DeoptimizationEntryDescriptor = VoidDescriptor;
-class AllocateDescriptor : public CallInterfaceDescriptor {
+class AllocateDescriptor
+ : public StaticCallInterfaceDescriptor<AllocateDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kRequestedSize)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
MachineType::IntPtr()) // kRequestedSize
- DECLARE_DESCRIPTOR(AllocateDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(AllocateDescriptor)
+
+ static constexpr auto registers();
};
// This descriptor defines the JavaScript calling convention that can be used
// by stubs: target, new.target, argc (not including the receiver) and context
// are passed in registers while receiver and the rest of the JS arguments are
// passed on the stack.
-class JSTrampolineDescriptor : public CallInterfaceDescriptor {
+class JSTrampolineDescriptor
+ : public StaticJSCallInterfaceDescriptor<JSTrampolineDescriptor> {
public:
DEFINE_JS_PARAMETERS()
DEFINE_JS_PARAMETER_TYPES()
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(JSTrampolineDescriptor,
- CallInterfaceDescriptor, 0)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(JSTrampolineDescriptor)
};
-class ContextOnlyDescriptor : public CallInterfaceDescriptor {
+class ContextOnlyDescriptor
+ : public StaticCallInterfaceDescriptor<ContextOnlyDescriptor> {
public:
DEFINE_PARAMETERS()
DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ContextOnlyDescriptor)
+
+ static constexpr auto registers();
};
-class NoContextDescriptor : public CallInterfaceDescriptor {
+class NoContextDescriptor
+ : public StaticCallInterfaceDescriptor<NoContextDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT()
DEFINE_PARAMETER_TYPES()
- DECLARE_DESCRIPTOR(NoContextDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(NoContextDescriptor)
+
+ static constexpr auto registers();
};
// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
-class LoadDescriptor : public CallInterfaceDescriptor {
+class LoadDescriptor : public StaticCallInterfaceDescriptor<LoadDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadDescriptor)
+
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register NameRegister();
+ static constexpr inline Register SlotRegister();
- static const Register ReceiverRegister();
- static const Register NameRegister();
- static const Register SlotRegister();
+ static constexpr auto registers();
};
// LoadBaselineDescriptor is a load descriptor that does not take a context as
// input.
-class LoadBaselineDescriptor : public CallInterfaceDescriptor {
+class LoadBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LoadBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadBaselineDescriptor)
+
+ static constexpr auto registers();
};
-class LoadGlobalNoFeedbackDescriptor : public CallInterfaceDescriptor {
+class LoadGlobalNoFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalNoFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kName, kICKind)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kICKind
- DECLARE_DESCRIPTOR(LoadGlobalNoFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalNoFeedbackDescriptor)
- static const Register NameRegister() {
- return LoadDescriptor::NameRegister();
- }
+ static constexpr inline Register ICKindRegister();
- static const Register ICKindRegister() {
- return LoadDescriptor::SlotRegister();
- }
+ static constexpr auto registers();
};
-class LoadNoFeedbackDescriptor : public LoadGlobalNoFeedbackDescriptor {
+class LoadNoFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<LoadNoFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kICKind)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kICKind
- DECLARE_DESCRIPTOR(LoadNoFeedbackDescriptor, LoadGlobalNoFeedbackDescriptor)
-
- static const Register ReceiverRegister() {
- return LoadDescriptor::ReceiverRegister();
- }
+ DECLARE_DESCRIPTOR(LoadNoFeedbackDescriptor)
- static const Register NameRegister() {
- return LoadGlobalNoFeedbackDescriptor::NameRegister();
- }
+ static constexpr inline Register ICKindRegister();
- static const Register ICKindRegister() {
- return LoadGlobalNoFeedbackDescriptor::ICKindRegister();
- }
+ static constexpr auto registers();
};
-class LoadGlobalDescriptor : public CallInterfaceDescriptor {
+class LoadGlobalDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalDescriptor> {
public:
DEFINE_PARAMETERS(kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadGlobalDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalDescriptor)
- static const Register NameRegister() {
- return LoadDescriptor::NameRegister();
- }
-
- static const Register SlotRegister() {
- return LoadDescriptor::SlotRegister();
- }
+ static constexpr auto registers();
};
-class LoadGlobalBaselineDescriptor : public CallInterfaceDescriptor {
+class LoadGlobalBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kName, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(LoadGlobalBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalBaselineDescriptor)
+
+ static constexpr auto registers();
};
-class LookupBaselineDescriptor : public CallInterfaceDescriptor {
+class LookupBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LookupBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kName, kDepth, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kDepth
MachineType::AnyTagged()) // kSlot
- DECLARE_DESCRIPTOR(LookupBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(LookupBaselineDescriptor)
};
-class StoreDescriptor : public CallInterfaceDescriptor {
+class StoreDescriptor : public StaticCallInterfaceDescriptor<StoreDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StoreDescriptor)
- static const Register ReceiverRegister();
- static const Register NameRegister();
- static const Register ValueRegister();
- static const Register SlotRegister();
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register NameRegister();
+ static constexpr inline Register ValueRegister();
+ static constexpr inline Register SlotRegister();
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+ static constexpr auto registers();
};
-class StoreBaselineDescriptor : public CallInterfaceDescriptor {
+class StoreBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<StoreBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kReceiver, kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StoreBaselineDescriptor)
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+ static constexpr auto registers();
};
-class StoreTransitionDescriptor : public StoreDescriptor {
+class StoreTransitionDescriptor
+ : public StaticCallInterfaceDescriptor<StoreTransitionDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kMap, kValue, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
@@ -828,17 +835,15 @@ class StoreTransitionDescriptor : public StoreDescriptor {
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(StoreTransitionDescriptor, StoreDescriptor)
+ DECLARE_DESCRIPTOR(StoreTransitionDescriptor)
- static const Register MapRegister();
- static const Register SlotRegister();
- static const Register VectorRegister();
+ static constexpr inline Register MapRegister();
- // Pass value, slot and vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr auto registers();
};
-class StoreWithVectorDescriptor : public StoreDescriptor {
+class StoreWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<StoreWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kName, kValue, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
@@ -846,72 +851,52 @@ class StoreWithVectorDescriptor : public StoreDescriptor {
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(StoreWithVectorDescriptor, StoreDescriptor)
+ DECLARE_DESCRIPTOR(StoreWithVectorDescriptor)
- static const Register VectorRegister();
+ static constexpr inline Register VectorRegister();
- // Pass value, slot and vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr auto registers();
};
-class StoreGlobalDescriptor : public CallInterfaceDescriptor {
+class StoreGlobalDescriptor
+ : public StaticCallInterfaceDescriptor<StoreGlobalDescriptor> {
public:
DEFINE_PARAMETERS(kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreGlobalDescriptor, CallInterfaceDescriptor)
-
- static const bool kPassLastArgsOnStack =
- StoreDescriptor::kPassLastArgsOnStack;
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
-
- static const Register NameRegister() {
- return StoreDescriptor::NameRegister();
- }
-
- static const Register ValueRegister() {
- return StoreDescriptor::ValueRegister();
- }
+ DECLARE_DESCRIPTOR(StoreGlobalDescriptor)
- static const Register SlotRegister() {
- return StoreDescriptor::SlotRegister();
- }
+ static constexpr auto registers();
};
-class StoreGlobalBaselineDescriptor : public CallInterfaceDescriptor {
+class StoreGlobalBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<StoreGlobalBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kName, kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(StoreGlobalBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StoreGlobalBaselineDescriptor)
- static const bool kPassLastArgsOnStack =
- StoreDescriptor::kPassLastArgsOnStack;
- // Pass value and slot through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 2 : 0;
+ static constexpr auto registers();
};
-class StoreGlobalWithVectorDescriptor : public StoreGlobalDescriptor {
+class StoreGlobalWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<StoreGlobalWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kName, kValue, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kValue
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(StoreGlobalWithVectorDescriptor, StoreGlobalDescriptor)
-
- static const Register VectorRegister() {
- return StoreWithVectorDescriptor::VectorRegister();
- }
+ DECLARE_DESCRIPTOR(StoreGlobalWithVectorDescriptor)
- // Pass value, slot and vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr auto registers();
};
-class LoadWithVectorDescriptor : public LoadDescriptor {
+class LoadWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<LoadWithVectorDescriptor> {
public:
// TODO(v8:9497): Revert the Machine type for kSlot to the
// TaggedSigned once Torque can emit better call descriptors
@@ -920,24 +905,19 @@ class LoadWithVectorDescriptor : public LoadDescriptor {
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(LoadWithVectorDescriptor, LoadDescriptor)
+ DECLARE_DESCRIPTOR(LoadWithVectorDescriptor)
- static const Register VectorRegister();
-
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
+ static constexpr inline Register VectorRegister();
- // Pass vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ static constexpr auto registers();
};
// Like LoadWithVectorDescriptor, except we pass the receiver (the object which
// should be used as the receiver for accessor function calls) and the lookup
// start object separately.
-class LoadWithReceiverAndVectorDescriptor : public LoadWithVectorDescriptor {
+class LoadWithReceiverAndVectorDescriptor
+ : public StaticCallInterfaceDescriptor<
+ LoadWithReceiverAndVectorDescriptor> {
public:
// TODO(v8:9497): Revert the Machine type for kSlot to the
// TaggedSigned once Torque can emit better call descriptors
@@ -947,22 +927,15 @@ class LoadWithReceiverAndVectorDescriptor : public LoadWithVectorDescriptor {
MachineType::AnyTagged(), // kName
MachineType::AnyTagged(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(LoadWithReceiverAndVectorDescriptor,
- LoadWithVectorDescriptor)
+ DECLARE_DESCRIPTOR(LoadWithReceiverAndVectorDescriptor)
- static const Register LookupStartObjectRegister();
+ static constexpr inline Register LookupStartObjectRegister();
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass vector through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ static constexpr auto registers();
};
-class LoadWithReceiverBaselineDescriptor : public LoadBaselineDescriptor {
+class LoadWithReceiverBaselineDescriptor
+ : public StaticCallInterfaceDescriptor<LoadWithReceiverBaselineDescriptor> {
public:
// TODO(v8:9497): Revert the Machine type for kSlot to the
// TaggedSigned once Torque can emit better call descriptors
@@ -971,29 +944,27 @@ class LoadWithReceiverBaselineDescriptor : public LoadBaselineDescriptor {
MachineType::AnyTagged(), // kLookupStartObject
MachineType::AnyTagged(), // kName
MachineType::AnyTagged()) // kSlot
- DECLARE_DESCRIPTOR(LoadWithReceiverBaselineDescriptor, LoadBaselineDescriptor)
+ DECLARE_DESCRIPTOR(LoadWithReceiverBaselineDescriptor)
+
+ static constexpr auto registers();
};
-class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
+class LoadGlobalWithVectorDescriptor
+ : public StaticCallInterfaceDescriptor<LoadGlobalWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kName, kSlot, kVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kName
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(LoadGlobalWithVectorDescriptor, LoadGlobalDescriptor)
+ DECLARE_DESCRIPTOR(LoadGlobalWithVectorDescriptor)
-#if V8_TARGET_ARCH_IA32
- // On ia32, LoadWithVectorDescriptor passes vector on the stack and thus we
- // need to choose a new register here.
- static const Register VectorRegister() { return edx; }
-#else
- static const Register VectorRegister() {
- return LoadWithVectorDescriptor::VectorRegister();
- }
-#endif
+ static constexpr inline Register VectorRegister();
+
+ static constexpr auto registers();
};
-class DynamicCheckMapsDescriptor final : public CallInterfaceDescriptor {
+class DynamicCheckMapsDescriptor final
+ : public StaticCallInterfaceDescriptor<DynamicCheckMapsDescriptor> {
public:
DEFINE_PARAMETERS(kMap, kSlot, kHandler)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int32(), // return val
@@ -1001,20 +972,28 @@ class DynamicCheckMapsDescriptor final : public CallInterfaceDescriptor {
MachineType::IntPtr(), // kSlot
MachineType::TaggedSigned()) // kHandler
- DECLARE_DESCRIPTOR(DynamicCheckMapsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(DynamicCheckMapsDescriptor)
+
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
};
-class FastNewObjectDescriptor : public CallInterfaceDescriptor {
+class FastNewObjectDescriptor
+ : public StaticCallInterfaceDescriptor<FastNewObjectDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged()) // kNewTarget
- DECLARE_DESCRIPTOR(FastNewObjectDescriptor, CallInterfaceDescriptor)
- static const Register TargetRegister();
- static const Register NewTargetRegister();
+ DECLARE_DESCRIPTOR(FastNewObjectDescriptor)
+
+ static constexpr inline Register TargetRegister();
+ static constexpr inline Register NewTargetRegister();
+
+ static constexpr auto registers();
};
-class RecordWriteDescriptor final : public CallInterfaceDescriptor {
+class RecordWriteDescriptor final
+ : public StaticCallInterfaceDescriptor<RecordWriteDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlot, kRememberedSet, kFPMode)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
@@ -1022,90 +1001,119 @@ class RecordWriteDescriptor final : public CallInterfaceDescriptor {
MachineType::TaggedSigned(), // kRememberedSet
MachineType::TaggedSigned()) // kFPMode
- DECLARE_DESCRIPTOR(RecordWriteDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(RecordWriteDescriptor)
+
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
};
-class EphemeronKeyBarrierDescriptor final : public CallInterfaceDescriptor {
+class EphemeronKeyBarrierDescriptor final
+ : public StaticCallInterfaceDescriptor<EphemeronKeyBarrierDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kObject, kSlotAddress, kFPMode)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), // kObject
MachineType::Pointer(), // kSlotAddress
MachineType::TaggedSigned()) // kFPMode
- DECLARE_DESCRIPTOR(EphemeronKeyBarrierDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(EphemeronKeyBarrierDescriptor)
+
+ static constexpr auto registers();
+ static constexpr bool kRestrictAllocatableRegisters = true;
};
-class TypeConversionDescriptor final : public CallInterfaceDescriptor {
+class TypeConversionDescriptor final
+ : public StaticCallInterfaceDescriptor<TypeConversionDescriptor> {
public:
DEFINE_PARAMETERS(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(TypeConversionDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeConversionDescriptor)
+
+ static constexpr inline Register ArgumentRegister();
- static const Register ArgumentRegister();
+ static constexpr auto registers();
};
-class TypeConversionNoContextDescriptor final : public CallInterfaceDescriptor {
+class TypeConversionNoContextDescriptor final
+ : public StaticCallInterfaceDescriptor<TypeConversionNoContextDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(TypeConversionNoContextDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeConversionNoContextDescriptor)
+
+ static constexpr auto registers();
};
-class TypeConversion_BaselineDescriptor final : public CallInterfaceDescriptor {
+class TypeConversion_BaselineDescriptor final
+ : public StaticCallInterfaceDescriptor<TypeConversion_BaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kArgument, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::UintPtr())
- DECLARE_DESCRIPTOR(TypeConversion_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeConversion_BaselineDescriptor)
};
-class SingleParameterOnStackDescriptor final : public CallInterfaceDescriptor {
+class SingleParameterOnStackDescriptor final
+ : public StaticCallInterfaceDescriptor<SingleParameterOnStackDescriptor> {
public:
DEFINE_PARAMETERS(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(SingleParameterOnStackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(SingleParameterOnStackDescriptor)
+
+ static constexpr auto registers();
};
class AsyncFunctionStackParameterDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ AsyncFunctionStackParameterDescriptor> {
public:
DEFINE_PARAMETERS(kPromise, kResult)
DEFINE_PARAMETER_TYPES(MachineType::TaggedPointer(), MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(AsyncFunctionStackParameterDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(AsyncFunctionStackParameterDescriptor)
+
+ static constexpr auto registers();
};
class GetIteratorStackParameterDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ GetIteratorStackParameterDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kCallSlot, kFeedback, kResult)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(GetIteratorStackParameterDescriptor)
+
+ static constexpr auto registers();
};
-class GetPropertyDescriptor final : public CallInterfaceDescriptor {
+class GetPropertyDescriptor final
+ : public StaticCallInterfaceDescriptor<GetPropertyDescriptor> {
public:
DEFINE_PARAMETERS(kObject, kKey)
- DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor, CallInterfaceDescriptor)
+ DECLARE_DEFAULT_DESCRIPTOR(GetPropertyDescriptor)
};
-class TypeofDescriptor : public CallInterfaceDescriptor {
+class TypeofDescriptor
+ : public StaticCallInterfaceDescriptor<TypeofDescriptor> {
public:
DEFINE_PARAMETERS(kObject)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(TypeofDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TypeofDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallTrampolineDescriptor : public CallInterfaceDescriptor {
+class CallTrampolineDescriptor
+ : public StaticCallInterfaceDescriptor<CallTrampolineDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::Int32()) // kActualArgumentsCount
- DECLARE_DESCRIPTOR(CallTrampolineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallTrampolineDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallVarargsDescriptor : public CallInterfaceDescriptor {
+class CallVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<CallVarargsDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kActualArgumentsCount, kArgumentsLength,
kArgumentsList)
@@ -1113,123 +1121,143 @@ class CallVarargsDescriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kActualArgumentsCount
MachineType::Int32(), // kArgumentsLength
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(CallVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
+class CallForwardVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<CallForwardVarargsDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kActualArgumentsCount, kStartIndex)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kActualArgumentsCount
MachineType::Int32()) // kStartIndex
- DECLARE_DESCRIPTOR(CallForwardVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallForwardVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallFunctionTemplateDescriptor : public CallInterfaceDescriptor {
+class CallFunctionTemplateDescriptor
+ : public StaticCallInterfaceDescriptor<CallFunctionTemplateDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kFunctionTemplateInfo, kArgumentsCount)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunctionTemplateInfo
MachineType::IntPtr()) // kArgumentsCount
- DECLARE_DESCRIPTOR(CallFunctionTemplateDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallFunctionTemplateDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallWithSpreadDescriptor : public CallInterfaceDescriptor {
+class CallWithSpreadDescriptor
+ : public StaticCallInterfaceDescriptor<CallWithSpreadDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged()) // kSpread
- DECLARE_DESCRIPTOR(CallWithSpreadDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallWithSpreadDescriptor)
+
+ static constexpr inline auto registers();
};
-class CallWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
+class CallWithSpread_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<CallWithSpread_BaselineDescriptor> {
public:
- DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot)
+ DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged(), // kSpread
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(CallWithSpread_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallWithSpread_BaselineDescriptor)
};
-class CallWithSpread_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class CallWithSpread_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CallWithSpread_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kTarget, kArgumentsCount, kSpread, kSlot,
- kFeedbackVector)
+ kFeedbackVector, kReceiver)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::Int32(), // kArgumentsCount
MachineType::AnyTagged(), // kSpread
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ MachineType::AnyTagged(), // kFeedbackVector
+ MachineType::AnyTagged()) // kReceiver
+ DECLARE_DESCRIPTOR(CallWithSpread_WithFeedbackDescriptor)
};
-class CallWithArrayLikeDescriptor : public CallInterfaceDescriptor {
+class CallWithArrayLikeDescriptor
+ : public StaticCallInterfaceDescriptor<CallWithArrayLikeDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kArgumentsList)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallWithArrayLikeDescriptor)
+
+ static constexpr inline auto registers();
};
class CallWithArrayLike_WithFeedbackDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ CallWithArrayLike_WithFeedbackDescriptor> {
public:
- DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kFeedbackVector)
+ DEFINE_PARAMETERS(kTarget, kArgumentsList, kSlot, kFeedbackVector, kReceiver)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged(), // kArgumentsList
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ MachineType::AnyTagged(), // kFeedbackVector
+ MachineType::AnyTagged()) // kReceiver
+ DECLARE_DESCRIPTOR(CallWithArrayLike_WithFeedbackDescriptor)
};
-class ConstructVarargsDescriptor : public CallInterfaceDescriptor {
+class ConstructVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructVarargsDescriptor> {
public:
DEFINE_JS_PARAMETERS(kArgumentsLength, kArgumentsList)
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32(), // kArgumentsLength
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(ConstructVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor {
+class ConstructForwardVarargsDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructForwardVarargsDescriptor> {
public:
DEFINE_JS_PARAMETERS(kStartIndex)
DEFINE_JS_PARAMETER_TYPES(MachineType::Int32())
- DECLARE_DESCRIPTOR(ConstructForwardVarargsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructForwardVarargsDescriptor)
+
+ static constexpr inline auto registers();
};
-class ConstructWithSpreadDescriptor : public CallInterfaceDescriptor {
+class ConstructWithSpreadDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructWithSpreadDescriptor> {
public:
DEFINE_JS_PARAMETERS(kSpread)
DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithSpreadDescriptor)
+
+ static constexpr inline auto registers();
};
-class ConstructWithSpread_BaselineDescriptor : public CallInterfaceDescriptor {
+class ConstructWithSpread_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<
+ ConstructWithSpread_BaselineDescriptor> {
public:
// Note: kSlot comes before kSpread since as an untagged value it must be
// passed in a register.
- DEFINE_JS_PARAMETERS(kSlot, kSpread)
+ DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot, kSpread)
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kSpread
- DECLARE_DESCRIPTOR(ConstructWithSpread_BaselineDescriptor,
- CallInterfaceDescriptor)
-
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass spread through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ DECLARE_DESCRIPTOR(ConstructWithSpread_BaselineDescriptor)
};
class ConstructWithSpread_WithFeedbackDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ConstructWithSpread_WithFeedbackDescriptor> {
public:
// Note: kSlot comes before kSpread since as an untagged value it must be
// passed in a register.
@@ -1237,21 +1265,24 @@ class ConstructWithSpread_WithFeedbackDescriptor
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
MachineType::AnyTagged(), // kSpread
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithSpread_WithFeedbackDescriptor)
};
-class ConstructWithArrayLikeDescriptor : public CallInterfaceDescriptor {
+class ConstructWithArrayLikeDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructWithArrayLikeDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
MachineType::AnyTagged(), // kNewTarget
MachineType::AnyTagged()) // kArgumentsList
- DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithArrayLikeDescriptor)
+
+ static constexpr inline auto registers();
};
class ConstructWithArrayLike_WithFeedbackDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ConstructWithArrayLike_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kArgumentsList, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kTarget
@@ -1259,38 +1290,44 @@ class ConstructWithArrayLike_WithFeedbackDescriptor
MachineType::AnyTagged(), // kArgumentsList
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructWithArrayLike_WithFeedbackDescriptor)
};
// TODO(ishell): consider merging this with ArrayConstructorDescriptor
-class ConstructStubDescriptor : public CallInterfaceDescriptor {
+class ConstructStubDescriptor
+ : public StaticCallInterfaceDescriptor<ConstructStubDescriptor> {
public:
// TODO(jgruber): Remove the unused allocation site parameter.
DEFINE_JS_PARAMETERS(kAllocationSite)
DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
// TODO(ishell): Use DECLARE_JS_COMPATIBLE_DESCRIPTOR if registers match
- DECLARE_DESCRIPTOR(ConstructStubDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ConstructStubDescriptor)
+
+ static constexpr inline auto registers();
};
-class AbortDescriptor : public CallInterfaceDescriptor {
+class AbortDescriptor : public StaticCallInterfaceDescriptor<AbortDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kMessageOrMessageId)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_DESCRIPTOR(AbortDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(AbortDescriptor)
+
+ static constexpr inline auto registers();
};
-class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayConstructorDescriptor
+ : public StaticJSCallInterfaceDescriptor<ArrayConstructorDescriptor> {
public:
DEFINE_JS_PARAMETERS(kAllocationSite)
DEFINE_JS_PARAMETER_TYPES(MachineType::AnyTagged())
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(ArrayConstructorDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(ArrayConstructorDescriptor)
};
-class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
+class ArrayNArgumentsConstructorDescriptor
+ : public StaticCallInterfaceDescriptor<
+ ArrayNArgumentsConstructorDescriptor> {
public:
// This descriptor declares only register arguments while respective number
// of JS arguments stay on the expression stack.
@@ -1300,12 +1337,14 @@ class ArrayNArgumentsConstructorDescriptor : public CallInterfaceDescriptor {
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction,
MachineType::AnyTagged(), // kAllocationSite
MachineType::Int32()) // kActualArgumentsCount
- DECLARE_DESCRIPTOR(ArrayNArgumentsConstructorDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ArrayNArgumentsConstructorDescriptor)
+
+ static constexpr auto registers();
};
class ArrayNoArgumentConstructorDescriptor
- : public ArrayNArgumentsConstructorDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ArrayNoArgumentConstructorDescriptor> {
public:
// This descriptor declares same register arguments as the parent
// ArrayNArgumentsConstructorDescriptor and it declares indices for
@@ -1316,12 +1355,14 @@ class ArrayNoArgumentConstructorDescriptor
MachineType::AnyTagged(), // kAllocationSite
MachineType::Int32(), // kActualArgumentsCount
MachineType::AnyTagged()) // kFunctionParameter
- DECLARE_DESCRIPTOR(ArrayNoArgumentConstructorDescriptor,
- ArrayNArgumentsConstructorDescriptor)
+ DECLARE_DESCRIPTOR(ArrayNoArgumentConstructorDescriptor)
+
+ static constexpr auto registers();
};
class ArraySingleArgumentConstructorDescriptor
- : public ArrayNArgumentsConstructorDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ ArraySingleArgumentConstructorDescriptor> {
public:
// This descriptor declares same register arguments as the parent
// ArrayNArgumentsConstructorDescriptor and it declares indices for
@@ -1334,44 +1375,56 @@ class ArraySingleArgumentConstructorDescriptor
// JS arguments on the stack
MachineType::AnyTagged(), // kArraySizeSmiParameter
MachineType::AnyTagged()) // kReceiverParameter
- DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor,
- ArrayNArgumentsConstructorDescriptor)
+ DECLARE_DESCRIPTOR(ArraySingleArgumentConstructorDescriptor)
+
+ static constexpr auto registers();
};
-class CompareDescriptor : public CallInterfaceDescriptor {
+class CompareDescriptor
+ : public StaticCallInterfaceDescriptor<CompareDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight)
- DECLARE_DESCRIPTOR(CompareDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CompareDescriptor)
+
+ static constexpr inline auto registers();
};
-class BinaryOpDescriptor : public CallInterfaceDescriptor {
+class BinaryOpDescriptor
+ : public StaticCallInterfaceDescriptor<BinaryOpDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight)
- DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BinaryOpDescriptor)
+
+ static constexpr inline auto registers();
};
-class BinaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
+class BinaryOp_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<BinaryOp_BaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(BinaryOp_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BinaryOp_BaselineDescriptor)
+
+ static constexpr inline auto registers();
};
// This desciptor is shared among String.p.charAt/charCodeAt/codePointAt
// as they all have the same interface.
-class StringAtDescriptor final : public CallInterfaceDescriptor {
+class StringAtDescriptor final
+ : public StaticCallInterfaceDescriptor<StringAtDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kPosition)
// TODO(turbofan): Return untagged value here.
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedSigned(), // result 1
MachineType::AnyTagged(), // kReceiver
MachineType::IntPtr()) // kPosition
- DECLARE_DESCRIPTOR(StringAtDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StringAtDescriptor)
};
-class StringAtAsStringDescriptor final : public CallInterfaceDescriptor {
+class StringAtAsStringDescriptor final
+ : public StaticCallInterfaceDescriptor<StringAtAsStringDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kPosition)
// TODO(turbofan): Return untagged value here.
@@ -1379,10 +1432,11 @@ class StringAtAsStringDescriptor final : public CallInterfaceDescriptor {
MachineType::TaggedPointer(), // result string
MachineType::AnyTagged(), // kReceiver
MachineType::IntPtr()) // kPosition
- DECLARE_DESCRIPTOR(StringAtAsStringDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StringAtAsStringDescriptor)
};
-class StringSubstringDescriptor final : public CallInterfaceDescriptor {
+class StringSubstringDescriptor final
+ : public StaticCallInterfaceDescriptor<StringSubstringDescriptor> {
public:
DEFINE_PARAMETERS(kString, kFrom, kTo)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kString
@@ -1390,18 +1444,19 @@ class StringSubstringDescriptor final : public CallInterfaceDescriptor {
MachineType::IntPtr()) // kTo
// TODO(turbofan): Allow builtins to return untagged values.
- DECLARE_DESCRIPTOR(StringSubstringDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(StringSubstringDescriptor)
};
-class CppBuiltinAdaptorDescriptor : public CallInterfaceDescriptor {
+class CppBuiltinAdaptorDescriptor
+ : public StaticJSCallInterfaceDescriptor<CppBuiltinAdaptorDescriptor> {
public:
DEFINE_JS_PARAMETERS(kCFunction)
DEFINE_JS_PARAMETER_TYPES(MachineType::Pointer())
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(CppBuiltinAdaptorDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(CppBuiltinAdaptorDescriptor)
};
-class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
+class CEntry1ArgvOnStackDescriptor
+ : public StaticCallInterfaceDescriptor<CEntry1ArgvOnStackDescriptor> {
public:
DEFINE_PARAMETERS(kArity, // register argument
kCFunction, // register argument
@@ -1415,10 +1470,13 @@ class CEntry1ArgvOnStackDescriptor : public CallInterfaceDescriptor {
MachineType::AnyTagged(), // kArgcSmi
MachineType::AnyTagged(), // kTargetCopy
MachineType::AnyTagged()) // kNewTargetCopy
- DECLARE_DESCRIPTOR(CEntry1ArgvOnStackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CEntry1ArgvOnStackDescriptor)
+
+ static constexpr auto registers();
};
-class ApiCallbackDescriptor : public CallInterfaceDescriptor {
+class ApiCallbackDescriptor
+ : public StaticCallInterfaceDescriptor<ApiCallbackDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kApiFunctionAddress, kActualArgumentsCount,
kCallData, kHolder)
@@ -1428,44 +1486,56 @@ class ApiCallbackDescriptor : public CallInterfaceDescriptor {
MachineType::IntPtr(), // kActualArgumentsCount
MachineType::AnyTagged(), // kCallData
MachineType::AnyTagged()) // kHolder
- DECLARE_DESCRIPTOR(ApiCallbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ApiCallbackDescriptor)
+
+ static constexpr inline auto registers();
};
-class ApiGetterDescriptor : public CallInterfaceDescriptor {
+class ApiGetterDescriptor
+ : public StaticCallInterfaceDescriptor<ApiGetterDescriptor> {
public:
DEFINE_PARAMETERS(kReceiver, kHolder, kCallback)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kReceiver
MachineType::AnyTagged(), // kHolder
MachineType::AnyTagged()) // kCallback
- DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ApiGetterDescriptor)
- static const Register ReceiverRegister();
- static const Register HolderRegister();
- static const Register CallbackRegister();
+ static constexpr inline Register ReceiverRegister();
+ static constexpr inline Register HolderRegister();
+ static constexpr inline Register CallbackRegister();
+
+ static constexpr auto registers();
};
// TODO(turbofan): We should probably rename this to GrowFastElementsDescriptor.
-class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
+class GrowArrayElementsDescriptor
+ : public StaticCallInterfaceDescriptor<GrowArrayElementsDescriptor> {
public:
DEFINE_PARAMETERS(kObject, kKey)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kObject
MachineType::AnyTagged()) // kKey
- DECLARE_DESCRIPTOR(GrowArrayElementsDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(GrowArrayElementsDescriptor)
+
+ static constexpr inline Register ObjectRegister();
+ static constexpr inline Register KeyRegister();
- static const Register ObjectRegister();
- static const Register KeyRegister();
+ static constexpr auto registers();
};
class V8_EXPORT_PRIVATE TailCallOptimizedCodeSlotDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ TailCallOptimizedCodeSlotDescriptor> {
public:
DEFINE_PARAMETERS(kOptimizedCodeEntry)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged()) // kAccumulator
- DECLARE_DESCRIPTOR(TailCallOptimizedCodeSlotDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(TailCallOptimizedCodeSlotDescriptor)
+
+ static constexpr auto registers();
};
-class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
+class BaselineOutOfLinePrologueDescriptor
+ : public StaticCallInterfaceDescriptor<
+ BaselineOutOfLinePrologueDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kCalleeContext, kClosure,
kJavaScriptCallArgCount, kStackFrameSize,
@@ -1477,32 +1547,31 @@ class BaselineOutOfLinePrologueDescriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kStackFrameSize
MachineType::AnyTagged(), // kJavaScriptCallNewTarget
MachineType::AnyTagged()) // kInterpreterBytecodeArray
- DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BaselineOutOfLinePrologueDescriptor)
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
+ static constexpr inline auto registers();
- // Pass bytecode array through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 1 : 0;
+ // We pass the context manually, so we have one extra register.
+ static constexpr int kMaxRegisterParams =
+ StaticCallInterfaceDescriptor::kMaxRegisterParams + 1;
};
-class BaselineLeaveFrameDescriptor : public CallInterfaceDescriptor {
+class BaselineLeaveFrameDescriptor
+ : public StaticCallInterfaceDescriptor<BaselineLeaveFrameDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kParamsSize, kWeight)
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kParamsSize
MachineType::Int32()) // kWeight
- DECLARE_DESCRIPTOR(BaselineLeaveFrameDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BaselineLeaveFrameDescriptor)
+
+ static constexpr inline Register ParamsSizeRegister();
+ static constexpr inline Register WeightRegister();
- static const Register ParamsSizeRegister();
- static const Register WeightRegister();
+ static constexpr inline auto registers();
};
class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<InterpreterDispatchDescriptor> {
public:
DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
kDispatchTable)
@@ -1510,21 +1579,27 @@ class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
MachineType::IntPtr(), // kBytecodeOffset
MachineType::AnyTagged(), // kBytecodeArray
MachineType::IntPtr()) // kDispatchTable
- DECLARE_DESCRIPTOR(InterpreterDispatchDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterDispatchDescriptor)
+
+ static constexpr inline auto registers();
};
-class InterpreterPushArgsThenCallDescriptor : public CallInterfaceDescriptor {
+class InterpreterPushArgsThenCallDescriptor
+ : public StaticCallInterfaceDescriptor<
+ InterpreterPushArgsThenCallDescriptor> {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kFunction)
DEFINE_PARAMETER_TYPES(MachineType::Int32(), // kNumberOfArguments
MachineType::Pointer(), // kFirstArgument
MachineType::AnyTagged()) // kFunction
- DECLARE_DESCRIPTOR(InterpreterPushArgsThenCallDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterPushArgsThenCallDescriptor)
+
+ static constexpr inline auto registers();
};
class InterpreterPushArgsThenConstructDescriptor
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<
+ InterpreterPushArgsThenConstructDescriptor> {
public:
DEFINE_PARAMETERS(kNumberOfArguments, kFirstArgument, kConstructor,
kNewTarget, kFeedbackElement)
@@ -1533,20 +1608,13 @@ class InterpreterPushArgsThenConstructDescriptor
MachineType::AnyTagged(), // kConstructor
MachineType::AnyTagged(), // kNewTarget
MachineType::AnyTagged()) // kFeedbackElement
- DECLARE_DESCRIPTOR(InterpreterPushArgsThenConstructDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterPushArgsThenConstructDescriptor)
-#if V8_TARGET_ARCH_IA32
- static const bool kPassLastArgsOnStack = true;
-#else
- static const bool kPassLastArgsOnStack = false;
-#endif
-
- // Pass constructor, new target and feedback element through the stack.
- static const int kStackArgumentsCount = kPassLastArgsOnStack ? 3 : 0;
+ static constexpr inline auto registers();
};
-class InterpreterCEntry1Descriptor : public CallInterfaceDescriptor {
+class InterpreterCEntry1Descriptor
+ : public StaticCallInterfaceDescriptor<InterpreterCEntry1Descriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(1, kNumberOfArguments, kFirstArgument,
kFunctionEntry)
@@ -1554,10 +1622,13 @@ class InterpreterCEntry1Descriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kNumberOfArguments
MachineType::Pointer(), // kFirstArgument
MachineType::Pointer()) // kFunctionEntry
- DECLARE_DESCRIPTOR(InterpreterCEntry1Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterCEntry1Descriptor)
+
+ static constexpr auto registers();
};
-class InterpreterCEntry2Descriptor : public CallInterfaceDescriptor {
+class InterpreterCEntry2Descriptor
+ : public StaticCallInterfaceDescriptor<InterpreterCEntry2Descriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(2, kNumberOfArguments, kFirstArgument,
kFunctionEntry)
@@ -1566,10 +1637,13 @@ class InterpreterCEntry2Descriptor : public CallInterfaceDescriptor {
MachineType::Int32(), // kNumberOfArguments
MachineType::Pointer(), // kFirstArgument
MachineType::Pointer()) // kFunctionEntry
- DECLARE_DESCRIPTOR(InterpreterCEntry2Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(InterpreterCEntry2Descriptor)
+
+ static constexpr auto registers();
};
-class ForInPrepareDescriptor : public CallInterfaceDescriptor {
+class ForInPrepareDescriptor
+ : public StaticCallInterfaceDescriptor<ForInPrepareDescriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(2, kEnumerator, kVectorIndex, kFeedbackVector)
DEFINE_RESULT_AND_PARAMETER_TYPES(
@@ -1578,120 +1652,133 @@ class ForInPrepareDescriptor : public CallInterfaceDescriptor {
MachineType::AnyTagged(), // kEnumerator
MachineType::TaggedSigned(), // kVectorIndex
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(ForInPrepareDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ForInPrepareDescriptor)
};
-class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
+class ResumeGeneratorDescriptor final
+ : public StaticCallInterfaceDescriptor<ResumeGeneratorDescriptor> {
public:
DEFINE_PARAMETERS(kValue, kGenerator)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
MachineType::AnyTagged()) // kGenerator
- DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor)
+
+ static constexpr inline auto registers();
};
-class ResumeGeneratorBaselineDescriptor final : public CallInterfaceDescriptor {
+class ResumeGeneratorBaselineDescriptor final
+ : public StaticCallInterfaceDescriptor<ResumeGeneratorBaselineDescriptor> {
public:
- DEFINE_PARAMETERS(kGeneratorObject, kRegisterCount)
+ DEFINE_PARAMETERS_NO_CONTEXT(kGeneratorObject, kRegisterCount)
DEFINE_RESULT_AND_PARAMETER_TYPES(
MachineType::TaggedSigned(), // return type
MachineType::AnyTagged(), // kGeneratorObject
MachineType::IntPtr(), // kRegisterCount
)
- DECLARE_DESCRIPTOR(ResumeGeneratorBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(ResumeGeneratorBaselineDescriptor)
};
class SuspendGeneratorBaselineDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<SuspendGeneratorBaselineDescriptor> {
public:
- DEFINE_PARAMETERS(kGeneratorObject, kSuspendId, kBytecodeOffset,
- kRegisterCount)
+ DEFINE_PARAMETERS_NO_CONTEXT(kGeneratorObject, kSuspendId, kBytecodeOffset,
+ kRegisterCount)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kGeneratorObject
MachineType::IntPtr(), // kSuspendId
MachineType::IntPtr(), // kBytecodeOffset
MachineType::IntPtr(), // kRegisterCount
)
- DECLARE_DESCRIPTOR(SuspendGeneratorBaselineDescriptor,
- CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(SuspendGeneratorBaselineDescriptor)
};
-class FrameDropperTrampolineDescriptor final : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kRestartFp)
- DEFINE_PARAMETER_TYPES(MachineType::Pointer())
- DECLARE_DESCRIPTOR(FrameDropperTrampolineDescriptor, CallInterfaceDescriptor)
-};
-
-class RunMicrotasksEntryDescriptor final : public CallInterfaceDescriptor {
+class RunMicrotasksEntryDescriptor final
+ : public StaticCallInterfaceDescriptor<RunMicrotasksEntryDescriptor> {
public:
DEFINE_PARAMETERS_ENTRY(kRootRegisterValue, kMicrotaskQueue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kRootRegisterValue
MachineType::Pointer()) // kMicrotaskQueue
- DECLARE_DESCRIPTOR(RunMicrotasksEntryDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(RunMicrotasksEntryDescriptor)
+
+ static constexpr inline auto registers();
};
-class RunMicrotasksDescriptor final : public CallInterfaceDescriptor {
+class RunMicrotasksDescriptor final
+ : public StaticCallInterfaceDescriptor<RunMicrotasksDescriptor> {
public:
DEFINE_PARAMETERS(kMicrotaskQueue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer())
- DECLARE_DESCRIPTOR(RunMicrotasksDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(RunMicrotasksDescriptor)
- static Register MicrotaskQueueRegister();
+ static constexpr inline Register MicrotaskQueueRegister();
};
-class WasmFloat32ToNumberDescriptor final : public CallInterfaceDescriptor {
+class WasmFloat32ToNumberDescriptor final
+ : public StaticCallInterfaceDescriptor<WasmFloat32ToNumberDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kValue)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result
MachineType::Float32()) // value
- DECLARE_DESCRIPTOR(WasmFloat32ToNumberDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmFloat32ToNumberDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ // We need a custom descriptor on ia32 to avoid using xmm0.
+ static constexpr inline auto registers();
+#endif
};
-class WasmFloat64ToNumberDescriptor final : public CallInterfaceDescriptor {
+class WasmFloat64ToNumberDescriptor final
+ : public StaticCallInterfaceDescriptor<WasmFloat64ToNumberDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kValue)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::AnyTagged(), // result
MachineType::Float64()) // value
- DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmFloat64ToNumberDescriptor)
+
+#if V8_TARGET_ARCH_IA32
+ // We need a custom descriptor on ia32 to avoid using xmm0.
+ static constexpr inline auto registers();
+#endif
};
class V8_EXPORT_PRIVATE I64ToBigIntDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<I64ToBigIntDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kArgument)
DEFINE_PARAMETER_TYPES(MachineType::Int64()) // kArgument
- DECLARE_DESCRIPTOR(I64ToBigIntDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(I64ToBigIntDescriptor)
};
// 32 bits version of the I64ToBigIntDescriptor call interface descriptor
class V8_EXPORT_PRIVATE I32PairToBigIntDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<I32PairToBigIntDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kLow, kHigh)
DEFINE_PARAMETER_TYPES(MachineType::Uint32(), // kLow
MachineType::Uint32()) // kHigh
- DECLARE_DESCRIPTOR(I32PairToBigIntDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(I32PairToBigIntDescriptor)
};
class V8_EXPORT_PRIVATE BigIntToI64Descriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<BigIntToI64Descriptor> {
public:
DEFINE_PARAMETERS(kArgument)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Int64(), // result 1
MachineType::AnyTagged()) // kArgument
- DECLARE_DESCRIPTOR(BigIntToI64Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BigIntToI64Descriptor)
};
class V8_EXPORT_PRIVATE BigIntToI32PairDescriptor final
- : public CallInterfaceDescriptor {
+ : public StaticCallInterfaceDescriptor<BigIntToI32PairDescriptor> {
public:
DEFINE_RESULT_AND_PARAMETERS(2, kArgument)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::Uint32(), // result 1
MachineType::Uint32(), // result 2
MachineType::AnyTagged()) // kArgument
- DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BigIntToI32PairDescriptor)
};
-class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
+class WasmI32AtomicWait32Descriptor final
+ : public StaticCallInterfaceDescriptor<WasmI32AtomicWait32Descriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValue, kTimeoutLow,
kTimeoutHigh)
@@ -1700,36 +1787,30 @@ class WasmI32AtomicWait32Descriptor final : public CallInterfaceDescriptor {
MachineType::Int32(), // kExpectedValue
MachineType::Uint32(), // kTimeoutLow
MachineType::Uint32()) // kTimeoutHigh
- DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmI32AtomicWait32Descriptor)
};
-class WasmI64AtomicWait32Descriptor final : public CallInterfaceDescriptor {
+class WasmI64AtomicWait32Descriptor final
+ : public StaticCallInterfaceDescriptor<WasmI64AtomicWait32Descriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kExpectedValueLow, kExpectedValueHigh,
kTimeoutLow, kTimeoutHigh)
- DEFINE_RESULT_AND_PARAMETER_TYPES_WITH_FLAG(
- CallInterfaceDescriptorData::kNoStackScan, // allow untagged stack params
- MachineType::Uint32(), // result 1
- MachineType::Uint32(), // kAddress
- MachineType::Uint32(), // kExpectedValueLow
- MachineType::Uint32(), // kExpectedValueHigh
- MachineType::Uint32(), // kTimeoutLow
- MachineType::Uint32()) // kTimeoutHigh
+ static constexpr bool kNoStackScan = true;
-#if V8_TARGET_ARCH_IA32
- static constexpr bool kPassLastArgOnStack = true;
-#else
- static constexpr bool kPassLastArgOnStack = false;
-#endif
-
- // Pass the last parameter through the stack.
- static constexpr int kStackArgumentsCount = kPassLastArgOnStack ? 1 : 0;
+ DEFINE_RESULT_AND_PARAMETER_TYPES(
+ MachineType::Uint32(), // result 1
+ MachineType::Uint32(), // kAddress
+ MachineType::Uint32(), // kExpectedValueLow
+ MachineType::Uint32(), // kExpectedValueHigh
+ MachineType::Uint32(), // kTimeoutLow
+ MachineType::Uint32()) // kTimeoutHigh
- DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(WasmI64AtomicWait32Descriptor)
};
-class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
+class CloneObjectWithVectorDescriptor final
+ : public StaticCallInterfaceDescriptor<CloneObjectWithVectorDescriptor> {
public:
DEFINE_PARAMETERS(kSource, kFlags, kSlot, kVector)
DEFINE_RESULT_AND_PARAMETER_TYPES(MachineType::TaggedPointer(), // result 1
@@ -1737,108 +1818,142 @@ class CloneObjectWithVectorDescriptor final : public CallInterfaceDescriptor {
MachineType::TaggedSigned(), // kFlags
MachineType::TaggedSigned(), // kSlot
MachineType::AnyTagged()) // kVector
- DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CloneObjectWithVectorDescriptor)
};
-class CloneObjectBaselineDescriptor final : public CallInterfaceDescriptor {
+class CloneObjectBaselineDescriptor final
+ : public StaticCallInterfaceDescriptor<CloneObjectBaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kSource, kFlags, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kSource
MachineType::TaggedSigned(), // kFlags
MachineType::TaggedSigned()) // kSlot
- DECLARE_DESCRIPTOR(CloneObjectBaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CloneObjectBaselineDescriptor)
};
-class BinaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class BinaryOp_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<BinaryOp_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(BinaryOp_WithFeedbackDescriptor)
+};
+
+class CallTrampoline_Baseline_CompactDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CallTrampoline_Baseline_CompactDescriptor> {
+ public:
+ using ArgumentCountField = base::BitField<uint32_t, 0, 8>;
+ using SlotField = base::BitField<uintptr_t, 8, 24>;
+
+ static bool EncodeBitField(uint32_t argc, uintptr_t slot, uint32_t* out) {
+ if (ArgumentCountField::is_valid(argc) && SlotField::is_valid(slot)) {
+ *out = ArgumentCountField::encode(argc) | SlotField::encode(slot);
+ return true;
+ }
+ return false;
+ }
+
+ DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kFunction, kBitField)
+ DEFINE_PARAMETER_TYPES(
+ MachineType::AnyTagged(), // kFunction
+ MachineType::Uint32()) // kBitField = ArgumentCountField | SlotField
+ DECLARE_DESCRIPTOR(CallTrampoline_Baseline_CompactDescriptor)
};
-class CallTrampoline_BaselineDescriptor : public CallInterfaceDescriptor {
+class CallTrampoline_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<CallTrampoline_BaselineDescriptor> {
public:
- DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot)
+ DEFINE_PARAMETERS_NO_CONTEXT_VARARGS(kFunction, kActualArgumentsCount, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::Int32(), // kActualArgumentsCount
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(CallTrampoline_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(CallTrampoline_BaselineDescriptor)
};
-class CallTrampoline_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class CallTrampoline_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<
+ CallTrampoline_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS_VARARGS(kFunction, kActualArgumentsCount, kSlot,
- kFeedbackVector)
+ kFeedbackVector, kReceiver)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kFunction
MachineType::Int32(), // kActualArgumentsCount
MachineType::UintPtr(), // kSlot
- MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(CallTrampoline_WithFeedbackDescriptor,
- CallInterfaceDescriptor)
+ MachineType::AnyTagged(), // kFeedbackVector
+ MachineType::AnyTagged()) // kReceiver
+ DECLARE_DESCRIPTOR(CallTrampoline_WithFeedbackDescriptor)
};
-class Compare_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class Compare_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<Compare_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kLeft, kRight, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(Compare_WithFeedbackDescriptor)
};
-class Compare_BaselineDescriptor : public CallInterfaceDescriptor {
+class Compare_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<Compare_BaselineDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kLeft, kRight, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kLeft
MachineType::AnyTagged(), // kRight
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(Compare_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(Compare_BaselineDescriptor)
+
+ static constexpr inline auto registers();
};
-class Construct_BaselineDescriptor : public CallInterfaceDescriptor {
+class Construct_BaselineDescriptor
+ : public StaticJSCallInterfaceDescriptor<Construct_BaselineDescriptor> {
public:
DEFINE_JS_PARAMETERS_NO_CONTEXT(kSlot)
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr()) // kSlot
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_BaselineDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_BaselineDescriptor)
};
-class Construct_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class Construct_WithFeedbackDescriptor
+ : public StaticJSCallInterfaceDescriptor<Construct_WithFeedbackDescriptor> {
public:
// kSlot is passed in a register, kFeedbackVector on the stack.
DEFINE_JS_PARAMETERS(kSlot, kFeedbackVector)
DEFINE_JS_PARAMETER_TYPES(MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_WithFeedbackDescriptor,
- CallInterfaceDescriptor, 1)
+ DECLARE_JS_COMPATIBLE_DESCRIPTOR(Construct_WithFeedbackDescriptor)
};
-class UnaryOp_WithFeedbackDescriptor : public CallInterfaceDescriptor {
+class UnaryOp_WithFeedbackDescriptor
+ : public StaticCallInterfaceDescriptor<UnaryOp_WithFeedbackDescriptor> {
public:
DEFINE_PARAMETERS(kValue, kSlot, kFeedbackVector)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
MachineType::UintPtr(), // kSlot
MachineType::AnyTagged()) // kFeedbackVector
- DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(UnaryOp_WithFeedbackDescriptor)
};
-class UnaryOp_BaselineDescriptor : public CallInterfaceDescriptor {
+class UnaryOp_BaselineDescriptor
+ : public StaticCallInterfaceDescriptor<UnaryOp_BaselineDescriptor> {
public:
- DEFINE_PARAMETERS(kValue, kSlot)
+ DEFINE_PARAMETERS_NO_CONTEXT(kValue, kSlot)
DEFINE_PARAMETER_TYPES(MachineType::AnyTagged(), // kValue
MachineType::UintPtr()) // kSlot
- DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR(UnaryOp_BaselineDescriptor)
};
-#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
- class Name##Descriptor : public CallInterfaceDescriptor { \
- public: \
- DEFINE_PARAMETERS(__VA_ARGS__) \
- DECLARE_DEFAULT_DESCRIPTOR(Name##Descriptor, CallInterfaceDescriptor) \
+#define DEFINE_TFS_BUILTIN_DESCRIPTOR(Name, ...) \
+ class Name##Descriptor \
+ : public StaticCallInterfaceDescriptor<Name##Descriptor> { \
+ public: \
+ DEFINE_PARAMETERS(__VA_ARGS__) \
+ DECLARE_DEFAULT_DESCRIPTOR(Name##Descriptor) \
};
BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DEFINE_TFS_BUILTIN_DESCRIPTOR
@@ -1852,11 +1967,12 @@ BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
#undef DECLARE_JS_COMPATIBLE_DESCRIPTOR
-#undef DEFINE_FLAGS_AND_RESULT_AND_PARAMETERS
#undef DEFINE_RESULT_AND_PARAMETERS
+#undef DEFINE_PARAMETERS_ENTRY
#undef DEFINE_PARAMETERS
#undef DEFINE_PARAMETERS_VARARGS
#undef DEFINE_PARAMETERS_NO_CONTEXT
+#undef DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT
#undef DEFINE_RESULT_AND_PARAMETER_TYPES
#undef DEFINE_PARAMETER_TYPES
#undef DEFINE_JS_PARAMETERS
diff --git a/chromium/v8/src/codegen/machine-type.cc b/chromium/v8/src/codegen/machine-type.cc
index 86fc480ea5b..1972c41b248 100644
--- a/chromium/v8/src/codegen/machine-type.cc
+++ b/chromium/v8/src/codegen/machine-type.cc
@@ -55,6 +55,8 @@ const char* MachineReprToString(MachineRepresentation rep) {
return "kRepCompressedPointer";
case MachineRepresentation::kCompressed:
return "kRepCompressed";
+ case MachineRepresentation::kMapWord:
+ return "kRepMapWord";
}
UNREACHABLE();
}
diff --git a/chromium/v8/src/codegen/machine-type.h b/chromium/v8/src/codegen/machine-type.h
index ac21d3c3e60..7a006084590 100644
--- a/chromium/v8/src/codegen/machine-type.h
+++ b/chromium/v8/src/codegen/machine-type.h
@@ -22,6 +22,19 @@ enum class MachineRepresentation : uint8_t {
kWord16,
kWord32,
kWord64,
+ // (uncompressed) MapWord
+ // kMapWord is the representation of a map word, i.e. a map in the header
+ // of a HeapObject.
+ // If V8_MAP_PACKING is disabled, a map word is just the map itself. Hence
+ // kMapWord is equivalent to kTaggedPointer -- in fact it will be
+ // translated to kTaggedPointer during memory lowering.
+ // If V8_MAP_PACKING is enabled, a map word is a Smi-like encoding of a map
+ // and some meta data. Memory lowering of kMapWord loads/stores
+ // produces low-level kTagged loads/stores plus the necessary
+ // decode/encode operations.
+ // In either case, the kMapWord representation is not used after memory
+ // lowering.
+ kMapWord,
kTaggedSigned, // (uncompressed) Smi
kTaggedPointer, // (uncompressed) HeapObject
kTagged, // (uncompressed) Object (Smi or HeapObject)
@@ -102,6 +115,10 @@ class MachineType {
return representation() == MachineRepresentation::kNone;
}
+ constexpr bool IsMapWord() const {
+ return representation() == MachineRepresentation::kMapWord;
+ }
+
constexpr bool IsSigned() const {
return semantic() == MachineSemantic::kInt32 ||
semantic() == MachineSemantic::kInt64;
@@ -187,6 +204,9 @@ class MachineType {
return MachineType(MachineRepresentation::kTaggedPointer,
MachineSemantic::kAny);
}
+ constexpr static MachineType MapInHeader() {
+ return MachineType(MachineRepresentation::kMapWord, MachineSemantic::kAny);
+ }
constexpr static MachineType TaggedSigned() {
return MachineType(MachineRepresentation::kTaggedSigned,
MachineSemantic::kInt32);
@@ -283,7 +303,8 @@ inline bool IsFloatingPoint(MachineRepresentation rep) {
inline bool CanBeTaggedPointer(MachineRepresentation rep) {
return rep == MachineRepresentation::kTagged ||
- rep == MachineRepresentation::kTaggedPointer;
+ rep == MachineRepresentation::kTaggedPointer ||
+ rep == MachineRepresentation::kMapWord;
}
inline bool CanBeTaggedSigned(MachineRepresentation rep) {
@@ -328,16 +349,12 @@ V8_EXPORT_PRIVATE inline constexpr int ElementSizeLog2Of(
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kMapWord:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
return kTaggedSizeLog2;
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
-#else
- // Return something for older compilers.
- return -1;
-#endif
}
}
diff --git a/chromium/v8/src/codegen/macro-assembler.h b/chromium/v8/src/codegen/macro-assembler.h
index ce3ccbf332b..484ec9e4b2c 100644
--- a/chromium/v8/src/codegen/macro-assembler.h
+++ b/chromium/v8/src/codegen/macro-assembler.h
@@ -10,7 +10,7 @@
#include "src/heap/heap.h"
// Helper types to make boolean flag easier to read at call-site.
-enum InvokeFlag { CALL_FUNCTION, JUMP_FUNCTION };
+enum class InvokeType { kCall, kJump };
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
@@ -28,6 +28,10 @@ enum AllocationFlags {
PRETENURE = 1 << 3,
};
+enum class RememberedSetAction { kOmit, kEmit };
+
+enum class SmiCheck { kOmit, kInline };
+
// This is the only place allowed to include the platform-specific headers.
#define INCLUDED_FROM_MACRO_ASSEMBLER_H
#if V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/codegen/mips/assembler-mips.cc b/chromium/v8/src/codegen/mips/assembler-mips.cc
index 2ef08ae87c2..c254860b147 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/assembler-mips.cc
@@ -261,29 +261,27 @@ static const int kNegOffset = 0x00008000;
// operations as post-increment of sp.
const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (kPointerSize & kImm16Mask); // NOLINT
+ (kPointerSize & kImm16Mask);
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (-kPointerSize & kImm16Mask); // NOLINT
+ (-kPointerSize & kImm16Mask);
// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern =
- SW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern = SW | (sp.code() << kRsShift) | (0 & kImm16Mask);
// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern =
- LW | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern = LW | (sp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpOffsetPattern =
- LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kSwRegFpOffsetPattern =
- SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpNegOffsetPattern =
- LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
const Instr kSwRegFpNegOffsetPattern =
- SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xFFE00000;
diff --git a/chromium/v8/src/codegen/mips/assembler-mips.h b/chromium/v8/src/codegen/mips/assembler-mips.h
index ccdea03a79c..47bdf26d551 100644
--- a/chromium/v8/src/codegen/mips/assembler-mips.h
+++ b/chromium/v8/src/codegen/mips/assembler-mips.h
@@ -1907,7 +1907,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
class EnsureSpace {
public:
- explicit inline EnsureSpace(Assembler* assembler);
+ explicit V8_INLINE EnsureSpace(Assembler* assembler);
};
class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
diff --git a/chromium/v8/src/codegen/mips/interface-descriptors-mips-inl.h b/chromium/v8/src/codegen/mips/interface-descriptors-mips-inl.h
new file mode 100644
index 00000000000..edea1b3844a
--- /dev/null
+++ b/chromium/v8/src/codegen/mips/interface-descriptors-mips-inl.h
@@ -0,0 +1,258 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
+#define V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, t0);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return t0;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return t0; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return t1; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return a3;
+}
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return t0;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // t0 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, t0, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // t0 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, t0, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ // a1 : kApiFunctionAddress
+ // a2 : kArgc
+ // a3 : kCallData
+ // a0 : kHolder
+ return RegisterArray(a1, a2, a3, a0);
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ // a0 : argument count (not including receiver
+ // a2 : address of first argument
+ // a1 : the target callable to be call
+ return RegisterArray(a0, a2, a1);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // t4 : address of the first argument
+ // a1 : constructor to call
+ // a3 : new target
+ // a2 : allocation site feedback if available, undefined otherwise
+ return RegisterArray(a0, t4, a1, a3, a2);
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ // v0 : the value to pass to the generator
+ // a1 : the JSGeneratorObject to resume
+ return RegisterArray(v0, a1);
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_MIPS
+
+#endif // V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_
diff --git a/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc b/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
deleted file mode 100644
index f41a0e14ca7..00000000000
--- a/chromium/v8/src/codegen/mips/interface-descriptors-mips.cc
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-// On MIPS it is not allowed to use odd numbered floating point registers
-// (e.g. f1, f3, etc.) for parameters. This can happen if we use
-// DefaultInitializePlatformSpecific to assign float registers for parameters.
-// E.g if fourth parameter goes to float register, f7 would be assigned for
-// parameter (a3 casted to int is 7).
-bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
- return reg.code() % 2 == 0;
-}
-
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, t0};
- CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0, a0, a1, a2, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return a1; }
-const Register LoadDescriptor::NameRegister() { return a2; }
-const Register LoadDescriptor::SlotRegister() { return a0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return t0;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return a1; }
-const Register StoreDescriptor::NameRegister() { return a2; }
-const Register StoreDescriptor::ValueRegister() { return a0; }
-const Register StoreDescriptor::SlotRegister() { return t0; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return t0; }
-const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register StoreTransitionDescriptor::MapRegister() { return t1; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return a0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a0: number of arguments
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // t0 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a0, t0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : function template info
- // a0 : number of arguments (on the stack, not including receiver)
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a2 : the object to spread
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a2 : the arguments list
- Register registers[] = {a1, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // t0 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a3, a0, t0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a3: new target
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a2 : the object to spread
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a3 : the new target
- // a2 : the arguments list
- Register registers[] = {a1, a3, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- // a2: allocation site or undefined
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- t4, // address of the first argument
- a1, // constructor to call
- a3, // new target
- a2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- v0, // the value to pass to the generator
- a1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
index 8bbdbca6627..d48b441c7bd 100644
--- a/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -64,7 +65,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -89,7 +90,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -100,7 +101,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -179,7 +180,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -188,7 +189,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
Addu(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
@@ -198,13 +199,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
}
@@ -336,7 +337,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
lw(scratch, MemOperand(address));
@@ -344,7 +345,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Operand(value));
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -354,7 +355,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -379,7 +380,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
}
@@ -4176,14 +4177,6 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- li(a1, ExternalReference::debug_restart_fp_address(isolate()));
- lw(a1, MemOperand(a1));
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne, a1, Operand(zero_reg));
-}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -4373,7 +4366,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -4484,9 +4477,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
@@ -4500,19 +4493,21 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
lw(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
- Call(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
- Jump(code);
+ switch (type) {
+ case InvokeType::kCall:
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Call(code);
+ break;
+ case InvokeType::kJump:
+ Addu(code, code, Code::kHeaderSize - kHeapObjectTag);
+ Jump(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -4522,9 +4517,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -4538,15 +4533,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(function, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -4555,7 +4550,7 @@ void MacroAssembler::InvokeFunction(Register function,
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
// ---------------------------------------------------------------------------
@@ -4699,8 +4694,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
@@ -4746,7 +4741,7 @@ void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code()) Check(cc, reason, rs, rt);
+ if (FLAG_debug_code) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -4761,11 +4756,11 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -4885,7 +4880,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set up new frame pointer.
addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -4999,7 +4994,7 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -5032,7 +5027,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5042,7 +5037,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5052,7 +5047,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5067,7 +5062,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5083,7 +5078,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5096,7 +5091,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5123,7 +5118,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -5416,7 +5411,7 @@ void TurboAssembler::CallCFunctionHelper(Register function_base,
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
#if V8_HOST_ARCH_MIPS
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
diff --git a/chromium/v8/src/codegen/mips/macro-assembler-mips.h b/chromium/v8/src/codegen/mips/macro-assembler-mips.h
index 8d54e0b737c..8a82eea6fa5 100644
--- a/chromium/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/chromium/v8/src/codegen/mips/macro-assembler-mips.h
@@ -45,8 +45,6 @@ enum LiFlags {
CONSTANT_SIZE = 1
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -974,8 +972,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -983,8 +981,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
void Pref(int32_t hint, const MemOperand& rs);
@@ -1016,7 +1014,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1027,13 +1025,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support.
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling.
@@ -1057,18 +1052,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
}
@@ -1155,7 +1150,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/chromium/v8/src/codegen/mips64/assembler-mips64.cc b/chromium/v8/src/codegen/mips64/assembler-mips64.cc
index 7f7ebd2c736..70a02ddb6ff 100644
--- a/chromium/v8/src/codegen/mips64/assembler-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/assembler-mips64.cc
@@ -235,29 +235,27 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
// operations as post-increment of sp.
const Instr kPopInstruction = DADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (kPointerSize & kImm16Mask); // NOLINT
+ (kPointerSize & kImm16Mask);
// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
const Instr kPushInstruction = DADDIU | (sp.code() << kRsShift) |
(sp.code() << kRtShift) |
- (-kPointerSize & kImm16Mask); // NOLINT
+ (-kPointerSize & kImm16Mask);
// Sd(r, MemOperand(sp, 0))
-const Instr kPushRegPattern =
- SD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPushRegPattern = SD | (sp.code() << kRsShift) | (0 & kImm16Mask);
// Ld(r, MemOperand(sp, 0))
-const Instr kPopRegPattern =
- LD | (sp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+const Instr kPopRegPattern = LD | (sp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpOffsetPattern =
- LW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kSwRegFpOffsetPattern =
- SW | (fp.code() << kRsShift) | (0 & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (0 & kImm16Mask);
const Instr kLwRegFpNegOffsetPattern =
- LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
const Instr kSwRegFpNegOffsetPattern =
- SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); // NOLINT
+ SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask);
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xFFE00000;
diff --git a/chromium/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h b/chromium/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
new file mode 100644
index 00000000000..62e32776ef1
--- /dev/null
+++ b/chromium/v8/src/codegen/mips64/interface-descriptors-mips64-inl.h
@@ -0,0 +1,258 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_MIPS64_INTERFACE_DESCRIPTORS_MIPS64_INL_H_
+#define V8_CODEGEN_MIPS64_INTERFACE_DESCRIPTORS_MIPS64_INL_H_
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, a0, a1, a2, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return a3;
+}
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return a4;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ // a1 : kApiFunctionAddress
+ // a2 : kArgc
+ // a3 : kCallData
+ // a0 : kHolder
+ return RegisterArray(a1, a2, a3, a0);
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a2 : address of first argument
+ // a1 : the target callable to be call
+ return RegisterArray(a0, a2, a1);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a4 : address of the first argument
+ // a1 : constructor to call
+ // a3 : new target
+ // a2 : allocation site feedback if available, undefined otherwise
+ return RegisterArray(a0, a4, a1, a3, a2);
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ // v0 : the value to pass to the generator
+ // a1 : the JSGeneratorObject to resume
+ return RegisterArray(v0, a1);
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_MIPS64
+
+#endif // V8_CODEGEN_MIPS64_INTERFACE_DESCRIPTORS_MIPS64_INL_H_
diff --git a/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc b/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
deleted file mode 100644
index f34d16e15b7..00000000000
--- a/chromium/v8/src/codegen/mips64/interface-descriptors-mips64.cc
+++ /dev/null
@@ -1,332 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-// On MIPS it is not allowed to use odd numbered floating point registers
-// (e.g. f1, f3, etc.) for parameters. This can happen if we use
-// DefaultInitializePlatformSpecific to assign float registers for parameters.
-// E.g if fourth parameter goes to float register, f7 would be assigned for
-// parameter (a3 casted to int is 7).
-bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) {
- return reg.code() % 2 == 0;
-}
-
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0, a0, a1, a2, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return a1; }
-const Register LoadDescriptor::NameRegister() { return a2; }
-const Register LoadDescriptor::SlotRegister() { return a0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return a4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return a1; }
-const Register StoreDescriptor::NameRegister() { return a2; }
-const Register StoreDescriptor::ValueRegister() { return a0; }
-const Register StoreDescriptor::SlotRegister() { return a4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register StoreTransitionDescriptor::MapRegister() { return a5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return a0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a0: number of arguments
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : function template info
- // a0 : number of arguments (on the stack, not including receiver)
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a2 : the object to spread
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a2 : the arguments list
- Register registers[] = {a1, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a3, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a3: new target
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a2 : the object to spread
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a3 : the new target
- // a2 : the arguments list
- Register registers[] = {a1, a3, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- // a2: allocation site or undefined
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a4, // address of the first argument
- a1, // constructor to call
- a3, // new target
- a2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- v0, // the value to pass to the generator
- a1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 29443a2e58d..a1896624e53 100644
--- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -64,7 +65,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -89,7 +90,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -100,7 +101,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -177,7 +178,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -186,7 +187,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
Daddu(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label ok;
And(t8, dst, Operand(kPointerSize - 1));
@@ -196,13 +197,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
@@ -334,7 +335,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(!AreAliased(object, address, value, t8));
DCHECK(!AreAliased(object, address, value, t9));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Ld(scratch, MemOperand(address));
@@ -342,7 +343,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
Operand(value));
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -352,7 +353,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -377,7 +378,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
@@ -4687,14 +4688,6 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- li(a1, ExternalReference::debug_restart_fp_address(isolate()));
- Ld(a1, MemOperand(a1));
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne, a1, Operand(zero_reg));
-}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -4887,7 +4880,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -4999,9 +4992,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
@@ -5015,19 +5008,21 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- Call(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- Jump(code);
+ switch (type) {
+ case InvokeType::kCall:
+ Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Call(code);
+ break;
+ case InvokeType::kJump:
+ Daddu(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Jump(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -5037,9 +5032,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -5053,15 +5048,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(a1, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -5070,7 +5065,7 @@ void MacroAssembler::InvokeFunction(Register function,
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
// ---------------------------------------------------------------------------
@@ -5214,8 +5209,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd);
}
@@ -5267,7 +5262,7 @@ void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code()) Check(cc, reason, rs, rt);
+ if (FLAG_debug_code) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -5282,11 +5277,11 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -5405,7 +5400,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -5521,7 +5516,7 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -5566,7 +5561,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5576,7 +5571,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -5586,7 +5581,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5601,7 +5596,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5617,7 +5612,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5630,7 +5625,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
SmiTst(object, t8);
@@ -5657,7 +5652,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -5949,7 +5944,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// PrepareCallCFunction. The C function must be called via t9, for mips ABI.
#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
diff --git a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h
index 756b594edb7..054f3345d1b 100644
--- a/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/chromium/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -55,8 +55,6 @@ enum LiFlags {
ADDRESS_LOAD = 2
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -994,8 +992,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -1003,8 +1001,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
void Pref(int32_t hint, const MemOperand& rs);
@@ -1072,7 +1070,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1083,12 +1081,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support.
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling.
@@ -1112,18 +1107,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1230,7 +1225,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.cc b/chromium/v8/src/codegen/optimized-compilation-info.cc
index f6fd5862fdc..4d5c7a1d574 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.cc
+++ b/chromium/v8/src/codegen/optimized-compilation-info.cc
@@ -23,12 +23,16 @@ namespace internal {
OptimizedCompilationInfo::OptimizedCompilationInfo(
Zone* zone, Isolate* isolate, Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure, CodeKind code_kind)
+ Handle<JSFunction> closure, CodeKind code_kind, BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame)
: code_kind_(code_kind),
+ osr_offset_(osr_offset),
+ osr_frame_(osr_frame),
zone_(zone),
optimization_id_(isolate->NextOptimizationId()) {
DCHECK_EQ(*shared, closure->shared());
DCHECK(shared->is_compiled());
+ DCHECK_IMPLIES(is_osr(), IsOptimizing());
bytecode_array_ = handle(shared->GetBytecodeArray(isolate), isolate);
shared_info_ = shared;
closure_ = closure;
@@ -64,8 +68,6 @@ bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
switch (flag) {
case kPoisonRegisterArguments:
return untrusted_code_mitigations();
- case kFunctionContextSpecializing:
- return !IsNativeContextIndependent();
default:
return true;
}
@@ -86,18 +88,22 @@ bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
void OptimizedCompilationInfo::ConfigureFlags() {
if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
+ if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
+
+ if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
+ set_concurrent_inlining();
+ }
switch (code_kind_) {
case CodeKind::TURBOFAN:
if (FLAG_function_context_specialization) {
set_function_context_specializing();
}
+ if (FLAG_turbo_splitting) set_splitting();
V8_FALLTHROUGH;
case CodeKind::TURBOPROP:
- case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
set_called_with_code_start_register();
set_switch_jump_table();
- if (FLAG_turbo_splitting) set_splitting();
if (FLAG_untrusted_code_mitigations) set_poison_register_arguments();
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
diff --git a/chromium/v8/src/codegen/optimized-compilation-info.h b/chromium/v8/src/codegen/optimized-compilation-info.h
index 20386cbbee0..b5ad1c98162 100644
--- a/chromium/v8/src/codegen/optimized-compilation-info.h
+++ b/chromium/v8/src/codegen/optimized-compilation-info.h
@@ -70,7 +70,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(TraceTurboAllocation, trace_turbo_allocation, 16) \
V(TraceHeapBroker, trace_heap_broker, 17) \
V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
- V(ConcurrentInlining, concurrent_inlining, 19)
+ V(ConcurrentInlining, concurrent_inlining, 19) \
+ V(DiscardResultForTesting, discard_result_for_testing, 20) \
+ V(InlineJSWasmCalls, inline_js_wasm_calls, 21)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
@@ -102,7 +104,15 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
- Handle<JSFunction> closure, CodeKind code_kind);
+ Handle<JSFunction> closure, CodeKind code_kind,
+ BytecodeOffset osr_offset,
+ JavaScriptFrame* osr_frame);
+ // For testing.
+ OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
+ Handle<JSFunction> closure, CodeKind code_kind)
+ : OptimizedCompilationInfo(zone, isolate, shared, closure, code_kind,
+ BytecodeOffset::None(), nullptr) {}
// Construct a compilation info for stub compilation, Wasm, and testing.
OptimizedCompilationInfo(Vector<const char> debug_name, Zone* zone,
CodeKind code_kind);
@@ -160,21 +170,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
bool IsOptimizing() const {
return CodeKindIsOptimizedJSFunction(code_kind());
}
- bool IsNativeContextIndependent() const {
- return code_kind() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
- }
bool IsTurboprop() const { return code_kind() == CodeKind::TURBOPROP; }
#if V8_ENABLE_WEBASSEMBLY
bool IsWasm() const { return code_kind() == CodeKind::WASM_FUNCTION; }
#endif // V8_ENABLE_WEBASSEMBLY
- void SetOptimizingForOsr(BytecodeOffset osr_offset,
- JavaScriptFrame* osr_frame) {
- DCHECK(IsOptimizing());
- osr_offset_ = osr_offset;
- osr_frame_ = osr_frame;
- }
-
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
@@ -293,7 +293,9 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#endif // V8_ENABLE_WEBASSEMBLY
// Entry point when compiling for OSR, {BytecodeOffset::None} otherwise.
- BytecodeOffset osr_offset_ = BytecodeOffset::None();
+ const BytecodeOffset osr_offset_ = BytecodeOffset::None();
+ // The current OSR frame for specialization or {nullptr}.
+ JavaScriptFrame* const osr_frame_ = nullptr;
// The zone from which the compilation pipeline working on this
// OptimizedCompilationInfo allocates.
@@ -309,9 +311,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
const int optimization_id_;
unsigned inlined_bytecode_size_ = 0;
- // The current OSR frame for specialization or {nullptr}.
- JavaScriptFrame* osr_frame_ = nullptr;
-
Vector<const char> debug_name_;
std::unique_ptr<char[]> trace_turbo_filename_;
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.cc b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
index 7da9484cce1..437f5f96c61 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.cc
@@ -55,7 +55,11 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
bool CpuFeatures::SupportsWasmSimd128() {
+#if V8_ENABLE_WEBASSEMBLY
return CpuFeatures::IsSupported(SIMD);
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
void CpuFeatures::ProbeImpl(bool cross_compile) {
@@ -1824,6 +1828,12 @@ void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
TX);
}
+void Assembler::lxvx(const Simd128Register rt, const MemOperand& src) {
+ int TX = 1;
+ emit(LXVX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
+ TX);
+}
+
void Assembler::lxsdx(const Simd128Register rt, const MemOperand& src) {
int TX = 1;
emit(LXSDX | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
@@ -1878,18 +1888,18 @@ void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
SX);
}
+void Assembler::stxvx(const Simd128Register rt, const MemOperand& dst) {
+ int SX = 1;
+ emit(STXVX | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
+ SX);
+}
+
void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
int TX = 1;
CHECK(is_uint8(imm.immediate()));
emit(XXSPLTIB | rt.code() * B21 | imm.immediate() * B11 | TX);
}
-void Assembler::xxbrq(const Simd128Register rt, const Simd128Register rb) {
- int BX = 1;
- int TX = 1;
- emit(XXBRQ | rt.code() * B21 | 31 * B16 | rb.code() * B11 | BX * B1 | TX);
-}
-
// Pseudo instructions.
void Assembler::nop(int type) {
Register reg = r0;
diff --git a/chromium/v8/src/codegen/ppc/assembler-ppc.h b/chromium/v8/src/codegen/ppc/assembler-ppc.h
index d5b37fe59fe..1d7ecf76d7c 100644
--- a/chromium/v8/src/codegen/ppc/assembler-ppc.h
+++ b/chromium/v8/src/codegen/ppc/assembler-ppc.h
@@ -448,6 +448,7 @@ class Assembler : public AssemblerBase {
}
PPC_XX2_OPCODE_A_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
+ PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
#undef DECLARE_PPC_XX2_INSTRUCTIONS
#define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
@@ -500,6 +501,9 @@ class Assembler : public AssemblerBase {
PPC_VX_OPCODE_A_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_A_FORM)
PPC_VX_OPCODE_B_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_B_FORM)
PPC_VX_OPCODE_C_FORM_LIST(DECLARE_PPC_VX_INSTRUCTIONS_C_FORM)
+ PPC_VX_OPCODE_D_FORM_LIST(
+ DECLARE_PPC_VX_INSTRUCTIONS_C_FORM) /* OPCODE_D_FORM can use
+ INSTRUCTIONS_C_FORM */
#undef DECLARE_PPC_VX_INSTRUCTIONS_A_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_B_FORM
#undef DECLARE_PPC_VX_INSTRUCTIONS_C_FORM
@@ -1028,6 +1032,7 @@ class Assembler : public AssemblerBase {
void mtvsrd(const Simd128Register rt, const Register ra);
void mtvsrdd(const Simd128Register rt, const Register ra, const Register rb);
void lxvd(const Simd128Register rt, const MemOperand& src);
+ void lxvx(const Simd128Register rt, const MemOperand& src);
void lxsdx(const Simd128Register rt, const MemOperand& src);
void lxsibzx(const Simd128Register rt, const MemOperand& src);
void lxsihzx(const Simd128Register rt, const MemOperand& src);
@@ -1037,8 +1042,8 @@ class Assembler : public AssemblerBase {
void stxsihx(const Simd128Register rs, const MemOperand& src);
void stxsiwx(const Simd128Register rs, const MemOperand& src);
void stxvd(const Simd128Register rt, const MemOperand& src);
+ void stxvx(const Simd128Register rt, const MemOperand& src);
void xxspltib(const Simd128Register rt, const Operand& imm);
- void xxbrq(const Simd128Register rt, const Simd128Register rb);
// Pseudo instructions
diff --git a/chromium/v8/src/codegen/ppc/constants-ppc.h b/chromium/v8/src/codegen/ppc/constants-ppc.h
index 5b37a2ee110..56732b7f8b9 100644
--- a/chromium/v8/src/codegen/ppc/constants-ppc.h
+++ b/chromium/v8/src/codegen/ppc/constants-ppc.h
@@ -418,6 +418,10 @@ using Instr = uint32_t;
/* Saturate */ \
V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
+#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
+ /* Vector Byte-Reverse Quadword */ \
+ V(xxbrq, XXBRQ, 0xF01F076C)
+
#define PPC_XX2_OPCODE_UNUSED_LIST(V) \
/* VSX Scalar Square Root Double-Precision */ \
V(xssqrtdp, XSSQRTDP, 0xF000012C) \
@@ -520,12 +524,11 @@ using Instr = uint32_t;
/* VSX Vector Test for software Square Root Single-Precision */ \
V(xvtsqrtsp, XVTSQRTSP, 0xF00002A8) \
/* Vector Splat Immediate Byte */ \
- V(xxspltib, XXSPLTIB, 0xF00002D0) \
- /* Vector Byte-Reverse Quadword */ \
- V(xxbrq, XXBRQ, 0xF000076C)
+ V(xxspltib, XXSPLTIB, 0xF00002D0)
#define PPC_XX2_OPCODE_LIST(V) \
PPC_XX2_OPCODE_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_B_FORM_LIST(V) \
PPC_XX2_OPCODE_UNUSED_LIST(V)
#define PPC_EVX_OPCODE_LIST(V) \
@@ -1983,6 +1986,8 @@ using Instr = uint32_t;
V(lxsspx, LXSSPX, 0x7C000418) \
/* Load VSR Vector Doubleword*2 Indexed */ \
V(lxvd, LXVD, 0x7C000698) \
+ /* Load VSX Vector Indexed */ \
+ V(lxvx, LXVX, 0x7C000218) \
/* Load VSR Vector Doubleword & Splat Indexed */ \
V(lxvdsx, LXVDSX, 0x7C000298) \
/* Load VSR Vector Word*4 Indexed */ \
@@ -2011,6 +2016,8 @@ using Instr = uint32_t;
V(stxsspx, STXSSPX, 0x7C000518) \
/* Store VSR Vector Doubleword*2 Indexed */ \
V(stxvd, STXVD, 0x7C000798) \
+ /* Store VSX Vector Indexed */ \
+ V(stxvx, STXVX, 0x7C000318) \
/* Store VSR Vector Word*4 Indexed */ \
V(stxvw, STXVW, 0x7C000718)
@@ -2430,6 +2437,12 @@ using Instr = uint32_t;
/* Vector Population Count Byte */ \
V(vpopcntb, VPOPCNTB, 0x10000703)
+#define PPC_VX_OPCODE_D_FORM_LIST(V) \
+ /* Vector Negate Word */ \
+ V(vnegw, VNEGW, 0x10060602) \
+ /* Vector Negate Doubleword */ \
+ V(vnegd, VNEGD, 0x10070602)
+
#define PPC_VX_OPCODE_UNUSED_LIST(V) \
/* Decimal Add Modulo */ \
V(bcdadd, BCDADD, 0xF0000400) \
@@ -2586,6 +2599,7 @@ using Instr = uint32_t;
PPC_VX_OPCODE_A_FORM_LIST(V) \
PPC_VX_OPCODE_B_FORM_LIST(V) \
PPC_VX_OPCODE_C_FORM_LIST(V) \
+ PPC_VX_OPCODE_D_FORM_LIST(V) \
PPC_VX_OPCODE_UNUSED_LIST(V)
#define PPC_XS_OPCODE_LIST(V) \
@@ -2919,9 +2933,19 @@ class Instruction {
PPC_VA_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
+ // Some VX opcodes have integers hard coded in the middle, handle those
+ // first.
+ opcode = extcode | BitField(20, 16) | BitField(10, 0);
+ switch (opcode) {
+ PPC_VX_OPCODE_D_FORM_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
opcode = extcode | BitField(10, 0);
switch (opcode) {
- PPC_VX_OPCODE_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_A_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_B_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_C_FORM_LIST(OPCODE_CASES)
+ PPC_VX_OPCODE_UNUSED_LIST(OPCODE_CASES)
PPC_X_OPCODE_EH_S_FORM_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
@@ -2935,9 +2959,17 @@ class Instruction {
PPC_XFX_OPCODE_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
+ // Some XX2 opcodes have integers hard coded in the middle, handle those
+ // first.
+ opcode = extcode | BitField(20, 16) | BitField(10, 2);
+ switch (opcode) {
+ PPC_XX2_OPCODE_B_FORM_LIST(OPCODE_CASES)
+ return static_cast<Opcode>(opcode);
+ }
opcode = extcode | BitField(10, 2);
switch (opcode) {
- PPC_XX2_OPCODE_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_UNUSED_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
opcode = extcode | BitField(10, 1);
diff --git a/chromium/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h b/chromium/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
new file mode 100644
index 00000000000..69529a3ce68
--- /dev/null
+++ b/chromium/v8/src/codegen/ppc/interface-descriptors-ppc-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_PPC_INTERFACE_DESCRIPTORS_PPC_INL_H_
+#define V8_CODEGEN_PPC_INTERFACE_DESCRIPTORS_PPC_INL_H_
+
+#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(r3, r4, r5, r6, r7);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(r3, r4, r5, r6, r7, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(r3, r4, r5, r6, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(r3, r4, r5, r6, r7, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return r4; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return r5; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return r3; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return r7;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return r4; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return r5; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return r3; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return r7; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r8; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return r3; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return r6; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r6;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r7;
+}
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r6); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // r3 : number of arguments
+ // r4 : the target to call
+ return RegisterArray(r4, r3);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r7 : arguments list length (untagged)
+ // r5 : arguments list (FixedArray)
+ return RegisterArray(r4, r3, r7, r5);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // r3 : number of arguments
+ // r5 : start index (to support rest parameters)
+ // r4 : the target to call
+ return RegisterArray(r4, r3, r5);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // r4 : function template info
+ // r5 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(r4, r5);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r5 : the object to spread
+ return RegisterArray(r4, r3, r5);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // r4 : the target to call
+ // r5 : the arguments list
+ return RegisterArray(r4, r5);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r6 : the new target
+ // r7 : arguments list length (untagged)
+ // r5 : arguments list (FixedArray)
+ return RegisterArray(r4, r6, r3, r7, r5);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // r3 : number of arguments
+ // r6 : the new target
+ // r5 : start index (to support rest parameters)
+ // r4 : the target to call
+ return RegisterArray(r4, r6, r3, r5);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // r3 : number of arguments (on the stack, not including receiver)
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : the object to spread
+ return RegisterArray(r4, r6, r3, r5);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : the arguments list
+ return RegisterArray(r4, r6, r5);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // r3 : number of arguments
+ // r4 : the target to call
+ // r6 : the new target
+ // r5 : allocation site or undefined
+ return RegisterArray(r4, r6, r3, r5);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(r4); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(r4, r3); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r4, r3); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(r4, // kApiFunctionAddress
+ r5, // kArgc
+ r6, // kCallData
+ r3); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(r3, // argument count (not including receiver)
+ r5, // address of first argument
+ r4); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ r3, // argument count (not including receiver)
+ r7, // address of the first argument
+ r4, // constructor to call
+ r6, // new target
+ r5); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(r3, // the value to pass to the generator
+ r4); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(r3, r4);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+
+#endif // V8_CODEGEN_PPC_INTERFACE_DESCRIPTORS_PPC_INL_H_
diff --git a/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc b/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
deleted file mode 100644
index ed304e80fc1..00000000000
--- a/chromium/v8/src/codegen/ppc/interface-descriptors-ppc.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {r3, r4, r5, r6, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r3, r4, r5, r6, r7};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return r4; }
-const Register LoadDescriptor::NameRegister() { return r5; }
-const Register LoadDescriptor::SlotRegister() { return r3; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return r6; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return r7;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return r4; }
-const Register StoreDescriptor::NameRegister() { return r5; }
-const Register StoreDescriptor::ValueRegister() { return r3; }
-const Register StoreDescriptor::SlotRegister() { return r7; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return r6; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return r7; }
-const Register StoreTransitionDescriptor::VectorRegister() { return r6; }
-const Register StoreTransitionDescriptor::MapRegister() { return r8; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return r3; }
-const Register ApiGetterDescriptor::CallbackRegister() { return r6; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return r3; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r6};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r4 : the target to call
- Register registers[] = {r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r7 : arguments list length (untagged)
- // r5 : arguments list (FixedArray)
- Register registers[] = {r4, r3, r7, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r5 : start index (to support rest parameters)
- // r4 : the target to call
- Register registers[] = {r4, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r4 : function template info
- // r5 : number of arguments (on the stack, not including receiver)
- Register registers[] = {r4, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r5 : the object to spread
- Register registers[] = {r4, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r4 : the target to call
- // r5 : the arguments list
- Register registers[] = {r4, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r6 : the new target
- // r7 : arguments list length (untagged)
- // r5 : arguments list (FixedArray)
- Register registers[] = {r4, r6, r3, r7, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r6 : the new target
- // r5 : start index (to support rest parameters)
- // r4 : the target to call
- Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments (on the stack, not including receiver)
- // r4 : the target to call
- // r6 : the new target
- // r5 : the object to spread
- Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r4 : the target to call
- // r6 : the new target
- // r5 : the arguments list
- Register registers[] = {r4, r6, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : number of arguments
- // r4 : the target to call
- // r6 : the new target
- // r5 : allocation site or undefined
- Register registers[] = {r4, r6, r3, r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // kApiFunctionAddress
- r5, // kArgc
- r6, // kCallData
- r3, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // argument count (not including receiver)
- r5, // address of first argument
- r4 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // argument count (not including receiver)
- r7, // address of the first argument
- r4, // constructor to call
- r6, // new target
- r5, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // the value to pass to the generator
- r4 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r4, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
index 658a41f381f..e9bce8411f1 100644
--- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -55,7 +56,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += kNumCallerSavedDoubles * kDoubleSize;
}
@@ -80,7 +81,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushDoubles(kCallerSavedDoubles);
bytes += kNumCallerSavedDoubles * kDoubleSize;
}
@@ -91,7 +92,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopDoubles(kCallerSavedDoubles);
bytes += kNumCallerSavedDoubles * kDoubleSize;
}
@@ -133,7 +134,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
- LoadP(destination, MemOperand(kRootRegister, offset), r0);
+ LoadU64(destination, MemOperand(kRootRegister, offset), r0);
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
@@ -184,7 +185,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Register scratch = ip;
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
- LoadP(scratch, MemOperand(kRootRegister, offset), r0);
+ LoadU64(scratch, MemOperand(kRootRegister, offset), r0);
if (cond != al) b(NegateCondition(cond), &skip, cr);
Jump(scratch);
bind(&skip);
@@ -214,9 +215,9 @@ void TurboAssembler::Jump(const ExternalReference& reference) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it.
- LoadP(ToRegister(ABI_TOC_REGISTER),
- MemOperand(scratch, kSystemPointerSize));
- LoadP(scratch, MemOperand(scratch, 0));
+ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(scratch, kSystemPointerSize));
+ LoadU64(scratch, MemOperand(scratch, 0));
}
Jump(scratch);
}
@@ -272,7 +273,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Label skip;
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
- LoadP(ip, MemOperand(kRootRegister, offset));
+ LoadU64(ip, MemOperand(kRootRegister, offset));
if (cond != al) b(NegateCondition(cond), &skip);
Call(ip);
bind(&skip);
@@ -411,7 +412,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) {
for (int16_t i = 0; i < Register::kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
- LoadP(ToRegister(i), MemOperand(location, stack_offset));
+ LoadU64(ToRegister(i), MemOperand(location, stack_offset));
stack_offset += kSystemPointerSize;
}
}
@@ -442,7 +443,7 @@ void TurboAssembler::MultiPushV128(RegList dregs, Register location) {
Simd128Register dreg = Simd128Register::from_code(i);
stack_offset -= kSimd128Size;
li(ip, Operand(stack_offset));
- StoreSimd128(dreg, MemOperand(location, ip), r0, kScratchSimd128Reg);
+ StoreSimd128(dreg, MemOperand(location, ip));
}
}
}
@@ -467,7 +468,7 @@ void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
if ((dregs & (1 << i)) != 0) {
Simd128Register dreg = Simd128Register::from_code(i);
li(ip, Operand(stack_offset));
- LoadSimd128(dreg, MemOperand(location, ip), r0, kScratchSimd128Reg);
+ LoadSimd128(dreg, MemOperand(location, ip));
stack_offset += kSimd128Size;
}
}
@@ -477,8 +478,8 @@ void TurboAssembler::MultiPopV128(RegList dregs, Register location) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
DCHECK(cond == al);
- LoadP(destination,
- MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
+ LoadU64(destination,
+ MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
}
void TurboAssembler::LoadTaggedPointerField(const Register& destination,
@@ -487,7 +488,7 @@ void TurboAssembler::LoadTaggedPointerField(const Register& destination,
if (COMPRESS_POINTERS_BOOL) {
DecompressTaggedPointer(destination, field_operand);
} else {
- LoadP(destination, field_operand, scratch);
+ LoadU64(destination, field_operand, scratch);
}
}
@@ -497,7 +498,7 @@ void TurboAssembler::LoadAnyTaggedField(const Register& destination,
if (COMPRESS_POINTERS_BOOL) {
DecompressAnyTagged(destination, field_operand);
} else {
- LoadP(destination, field_operand, scratch);
+ LoadU64(destination, field_operand, scratch);
}
}
@@ -505,7 +506,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc) {
if (SmiValuesAre31Bits()) {
lwz(dst, src);
} else {
- LoadP(dst, src);
+ LoadU64(dst, src);
}
SmiUntag(dst, rc);
@@ -550,7 +551,7 @@ void TurboAssembler::DecompressTaggedSigned(Register destination,
void TurboAssembler::DecompressTaggedSigned(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedSigned");
- LoadWord(destination, field_operand, r0);
+ LoadU32(destination, field_operand, r0);
RecordComment("]");
}
@@ -565,7 +566,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressTaggedPointer(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressTaggedPointer");
- LoadWord(destination, field_operand, r0);
+ LoadU32(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
@@ -573,7 +574,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
void TurboAssembler::DecompressAnyTagged(Register destination,
MemOperand field_operand) {
RecordComment("[ DecompressAnyTagged");
- LoadWord(destination, field_operand, r0);
+ LoadU32(destination, field_operand, r0);
add(destination, destination, kRootRegister);
RecordComment("]");
}
@@ -597,7 +598,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -606,7 +607,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
Add(dst, object, offset - kHeapObjectTag, r0);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
andi(r0, dst, Operand(kTaggedSize - 1));
beq(&ok, cr0);
@@ -615,13 +616,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
}
@@ -752,13 +753,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK(object != value);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
LoadTaggedPointerField(r0, MemOperand(address));
cmp(r0, value);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -768,7 +769,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -794,7 +795,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
}
@@ -849,12 +850,12 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
void TurboAssembler::RestoreFrameStateForTailCall() {
if (FLAG_enable_embedded_constant_pool) {
- LoadP(kConstantPoolRegister,
- MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ LoadU64(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
set_constant_pool_available(false);
}
- LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ LoadU64(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadU64(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
mtlr(r0);
}
@@ -1174,11 +1175,11 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
// Drop the execution stack down to the frame pointer and restore
// the caller's state.
int frame_ends;
- LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ LoadU64(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ LoadU64(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
if (FLAG_enable_embedded_constant_pool) {
- LoadP(kConstantPoolRegister,
- MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
+ LoadU64(kConstantPoolRegister,
+ MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
}
mtlr(r0);
frame_ends = pc_offset();
@@ -1221,7 +1222,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Reserve room for saved entry sp.
subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(r8, Operand::Zero());
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -1305,7 +1306,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
// Restore current context from top and clear it in debug mode.
Move(ip,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- LoadP(cp, MemOperand(ip));
+ LoadU64(cp, MemOperand(ip));
#ifdef DEBUG
mov(r6, Operand(Context::kInvalidContext));
@@ -1393,7 +1394,7 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
intptr_t offset =
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
CHECK(is_int32(offset));
- LoadP(destination, MemOperand(kRootRegister, offset), r0);
+ LoadU64(destination, MemOperand(kRootRegister, offset), r0);
}
void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
@@ -1413,7 +1414,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// r3: actual arguments count
@@ -1528,9 +1529,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r4);
DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
@@ -1544,18 +1545,20 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -1565,9 +1568,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r4.
DCHECK_EQ(fun, r4);
@@ -1583,15 +1586,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r4.
DCHECK_EQ(function, r4);
@@ -1600,18 +1603,7 @@ void MacroAssembler::InvokeFunction(Register function,
LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
InvokeFunctionCode(r4, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
-}
-
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Move(r4, restart_fp);
- LoadP(r4, MemOperand(r4));
- cmpi(r4, Operand::Zero());
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
+ actual_parameter_count, type);
}
void MacroAssembler::PushStackHandler() {
@@ -1625,7 +1617,7 @@ void MacroAssembler::PushStackHandler() {
// Preserve r4-r8.
Move(r3,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
- LoadP(r0, MemOperand(r3));
+ LoadU64(r0, MemOperand(r3));
push(r0);
// Set this new handler as the current one.
@@ -1806,7 +1798,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
- LoadP(result, MemOperand(sp));
+ LoadU64(result, MemOperand(sp));
addi(sp, sp, Operand(kDoubleSize));
pop(r0);
mtlr(r0);
@@ -1873,8 +1865,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r4, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1921,7 +1913,7 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
- if (emit_debug_code()) Check(cond, reason, cr);
+ if (FLAG_debug_code) Check(cond, reason, cr);
}
void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
@@ -1935,11 +1927,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -1984,7 +1976,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmi, cr0);
@@ -1992,7 +1984,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(eq, AbortReason::kOperandIsNotASmi, cr0);
@@ -2000,7 +1992,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
@@ -2014,7 +2006,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
@@ -2028,7 +2020,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
@@ -2040,7 +2032,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
TestIfSmi(object, r0);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
@@ -2070,7 +2062,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, RootIndex::kUndefinedValue);
@@ -2217,9 +2209,9 @@ void TurboAssembler::CallCFunctionHelper(Register function,
if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) {
// AIX/PPC64BE Linux uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER),
- MemOperand(function, kSystemPointerSize));
- LoadP(ip, MemOperand(function, 0));
+ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(function, kSystemPointerSize));
+ LoadU64(ip, MemOperand(function, 0));
dest = ip;
} else if (ABI_CALL_VIA_IP) {
// pLinux and Simualtor, not AIX
@@ -2251,7 +2243,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
if (ActivationFrameAlignment() > kSystemPointerSize) {
- LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
+ LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
addi(sp, sp, Operand(stack_space * kSystemPointerSize));
}
@@ -2263,7 +2255,7 @@ void TurboAssembler::CheckPageFlag(
int mask, Condition cc, Label* condition_met) {
DCHECK(cc == ne || cc == eq);
ClearRightImm(scratch, object, Operand(kPageSizeBits));
- LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
+ LoadU64(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
mov(r0, Operand(mask));
and_(r0, scratch, r0, SetRC);
@@ -2703,8 +2695,8 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
}
// Load a "pointer" sized value from the memory location
-void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::LoadU64(Register dst, const MemOperand& mem,
+ Register scratch) {
DCHECK_EQ(mem.rb(), no_reg);
int offset = mem.offset();
int misaligned = (offset & 3);
@@ -2797,43 +2789,41 @@ void TurboAssembler::StorePU(Register src, const MemOperand& mem,
}
}
-void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::LoadS32(Register dst, const MemOperand& mem,
+ Register scratch) {
int offset = mem.offset();
if (!is_int16(offset)) {
- DCHECK(scratch != no_reg);
+ CHECK(scratch != no_reg);
mov(scratch, Operand(offset));
lwax(dst, MemOperand(mem.ra(), scratch));
} else {
-#if V8_TARGET_ARCH_PPC64
int misaligned = (offset & 3);
if (misaligned) {
// adjust base to conform to offset alignment requirements
// Todo: enhance to use scratch if dst is unsuitable
- DCHECK(dst != r0);
+ CHECK(dst != r0);
addi(dst, mem.ra(), Operand((offset & 3) - 4));
lwa(dst, MemOperand(dst, (offset & ~3) + 4));
} else {
lwa(dst, mem);
}
-#else
- lwz(dst, mem);
-#endif
}
}
// Variable length depending on whether offset fits into immediate field
// MemOperand currently only supports d-form
-void TurboAssembler::LoadWord(Register dst, const MemOperand& mem,
- Register scratch) {
+void TurboAssembler::LoadU32(Register dst, const MemOperand& mem,
+ Register scratch) {
Register base = mem.ra();
int offset = mem.offset();
if (!is_int16(offset)) {
- LoadIntLiteral(scratch, offset);
+ CHECK(scratch != no_reg);
+ mov(scratch, Operand(offset));
lwzx(dst, MemOperand(base, scratch));
} else {
+ // lwz can handle offset misalign
lwz(dst, mem);
}
}
@@ -2992,22 +2982,8 @@ void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
}
}
-void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
- Register ScratchReg,
- Simd128Register ScratchDoubleReg) {
- // lvx needs the stack to be 16 byte aligned.
- // We first use lxvd/stxvd to copy the content on an aligned address. lxvd
- // itself reverses the lanes so it cannot be used as is.
- lxvd(ScratchDoubleReg, mem);
- mr(ScratchReg, sp);
- ClearRightImm(
- sp, sp,
- Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
- addi(sp, sp, Operand(-16));
- stxvd(ScratchDoubleReg, MemOperand(r0, sp));
- // Load it with correct lane ordering.
- lvx(dst, MemOperand(r0, sp));
- mr(sp, ScratchReg);
+void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem) {
+ lxvx(dst, mem);
}
void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
@@ -3062,21 +3038,8 @@ void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
}
}
-void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
- Register ScratchReg,
- Simd128Register ScratchDoubleReg) {
- // stvx needs the stack to be 16 byte aligned.
- // We use lxvd/stxvd to store the content on an aligned address. stxvd
- // itself reverses the lanes so it cannot be used as is.
- mr(ScratchReg, sp);
- ClearRightImm(
- sp, sp,
- Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
- addi(sp, sp, Operand(-16));
- stvx(src, MemOperand(r0, sp));
- lxvd(ScratchDoubleReg, MemOperand(r0, sp));
- mr(sp, ScratchReg);
- stxvd(ScratchDoubleReg, mem);
+void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem) {
+ stxvx(src, mem);
}
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
@@ -3115,7 +3078,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
DCHECK(!AreAliased(src, dst.rb(), scratch));
DCHECK(!AreAliased(src, scratch));
mr(scratch, src);
- LoadP(src, dst, r0);
+ LoadU64(src, dst, r0);
StoreP(scratch, dst, r0);
}
@@ -3137,14 +3100,14 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
src = dst;
dst = temp;
}
- LoadP(scratch_1, dst, scratch_0);
- LoadP(scratch_0, src);
+ LoadU64(scratch_1, dst, scratch_0);
+ LoadU64(scratch_0, src);
StoreP(scratch_1, src);
StoreP(scratch_0, dst, scratch_1);
} else {
- LoadP(scratch_1, dst, scratch_0);
+ LoadU64(scratch_1, dst, scratch_0);
push(scratch_1);
- LoadP(scratch_0, src, scratch_1);
+ LoadU64(scratch_0, src, scratch_1);
StoreP(scratch_0, dst, scratch_1);
pop(scratch_1);
StoreP(scratch_1, src, scratch_0);
@@ -3218,13 +3181,13 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
DCHECK(src != scratch);
// push v0, to be used as scratch
addi(sp, sp, Operand(-kSimd128Size));
- StoreSimd128(v0, MemOperand(r0, sp), r0, scratch);
+ StoreSimd128(v0, MemOperand(r0, sp));
mov(ip, Operand(dst.offset()));
- LoadSimd128(v0, MemOperand(dst.ra(), ip), r0, scratch);
- StoreSimd128(src, MemOperand(dst.ra(), ip), r0, scratch);
+ LoadSimd128(v0, MemOperand(dst.ra(), ip));
+ StoreSimd128(src, MemOperand(dst.ra(), ip));
vor(src, v0, v0);
// restore v0
- LoadSimd128(v0, MemOperand(r0, sp), ip, scratch);
+ LoadSimd128(v0, MemOperand(r0, sp));
addi(sp, sp, Operand(kSimd128Size));
}
@@ -3232,23 +3195,23 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
Simd128Register scratch) {
// push v0 and v1, to be used as scratch
addi(sp, sp, Operand(2 * -kSimd128Size));
- StoreSimd128(v0, MemOperand(r0, sp), ip, scratch);
+ StoreSimd128(v0, MemOperand(r0, sp));
li(ip, Operand(kSimd128Size));
- StoreSimd128(v1, MemOperand(ip, sp), r0, scratch);
+ StoreSimd128(v1, MemOperand(ip, sp));
mov(ip, Operand(src.offset()));
- LoadSimd128(v0, MemOperand(src.ra(), ip), r0, scratch);
+ LoadSimd128(v0, MemOperand(src.ra(), ip));
mov(ip, Operand(dst.offset()));
- LoadSimd128(v1, MemOperand(dst.ra(), ip), r0, scratch);
+ LoadSimd128(v1, MemOperand(dst.ra(), ip));
- StoreSimd128(v0, MemOperand(dst.ra(), ip), r0, scratch);
+ StoreSimd128(v0, MemOperand(dst.ra(), ip));
mov(ip, Operand(src.offset()));
- StoreSimd128(v1, MemOperand(src.ra(), ip), r0, scratch);
+ StoreSimd128(v1, MemOperand(src.ra(), ip));
// restore v0 and v1
- LoadSimd128(v0, MemOperand(r0, sp), ip, scratch);
+ LoadSimd128(v0, MemOperand(r0, sp));
li(ip, Operand(kSimd128Size));
- LoadSimd128(v1, MemOperand(ip, sp), r0, scratch);
+ LoadSimd128(v1, MemOperand(ip, sp));
addi(sp, sp, Operand(2 * kSimd128Size));
}
@@ -3313,7 +3276,7 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
- LoadWordArith(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ LoadS32(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
and_(r0, scratch, r0, SetRC);
bne(&if_code_is_off_heap, cr0);
@@ -3326,13 +3289,12 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
- LoadWordArith(scratch,
- FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ LoadS32(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
add(destination, destination, kRootRegister);
- LoadP(destination,
- MemOperand(destination, IsolateData::builtin_entry_table_offset()),
- r0);
+ LoadU64(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()),
+ r0);
bind(&out);
} else {
@@ -3366,8 +3328,9 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) {
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// AIX/PPC64BE Linux uses a function descriptor. When calling C code be
// aware of this descriptor and pick up values from it
- LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kSystemPointerSize));
- LoadP(ip, MemOperand(target, 0));
+ LoadU64(ToRegister(ABI_TOC_REGISTER),
+ MemOperand(target, kSystemPointerSize));
+ LoadU64(ip, MemOperand(target, 0));
dest = ip;
} else if (ABI_CALL_VIA_IP && dest != ip) {
Move(ip, target);
@@ -3388,8 +3351,8 @@ void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label* ret, Label*) {
BlockTrampolinePoolScope block_trampoline_pool(this);
- LoadP(ip, MemOperand(kRootRegister,
- IsolateData::builtin_entry_slot_offset(target)));
+ LoadU64(ip, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
Call(ip);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
diff --git a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h
index 1d8f3a388d4..f657f90f760 100644
--- a/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/chromium/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -28,8 +28,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -149,10 +147,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// These exist to provide portability between 32 and 64bit
- void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+ void LoadU64(Register dst, const MemOperand& mem, Register scratch = no_reg);
void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
- void LoadWordArith(Register dst, const MemOperand& mem,
- Register scratch = no_reg);
+ void LoadS32(Register dst, const MemOperand& mem, Register scratch = no_reg);
void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
@@ -161,8 +158,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadFloat32(DoubleRegister dst, const MemOperand& mem,
Register scratch = no_reg);
void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
- void LoadSimd128(Simd128Register dst, const MemOperand& mem,
- Register ScratchReg, Simd128Register ScratchDoubleReg);
+ void LoadSimd128(Simd128Register dst, const MemOperand& mem);
// load a literal signed int value <value> to GPR <dst>
void LoadIntLiteral(Register dst, int value);
@@ -185,8 +181,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = no_reg);
void StoreSingleU(DoubleRegister src, const MemOperand& mem,
Register scratch = no_reg);
- void StoreSimd128(Simd128Register src, const MemOperand& mem,
- Register ScratchReg, Simd128Register ScratchDoubleReg);
+ void StoreSimd128(Simd128Register src, const MemOperand& mem);
void Cmpi(Register src1, const Operand& src2, Register scratch,
CRegister cr = cr7);
@@ -253,36 +248,36 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
- LoadP(src2, MemOperand(sp, 0));
- LoadP(src1, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src2, MemOperand(sp, 0));
+ LoadU64(src1, MemOperand(sp, kSystemPointerSize));
addi(sp, sp, Operand(2 * kSystemPointerSize));
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
- LoadP(src3, MemOperand(sp, 0));
- LoadP(src2, MemOperand(sp, kSystemPointerSize));
- LoadP(src1, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadU64(src3, MemOperand(sp, 0));
+ LoadU64(src2, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src1, MemOperand(sp, 2 * kSystemPointerSize));
addi(sp, sp, Operand(3 * kSystemPointerSize));
}
// Pop four registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4) {
- LoadP(src4, MemOperand(sp, 0));
- LoadP(src3, MemOperand(sp, kSystemPointerSize));
- LoadP(src2, MemOperand(sp, 2 * kSystemPointerSize));
- LoadP(src1, MemOperand(sp, 3 * kSystemPointerSize));
+ LoadU64(src4, MemOperand(sp, 0));
+ LoadU64(src3, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src2, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadU64(src1, MemOperand(sp, 3 * kSystemPointerSize));
addi(sp, sp, Operand(4 * kSystemPointerSize));
}
// Pop five registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3, Register src4,
Register src5) {
- LoadP(src5, MemOperand(sp, 0));
- LoadP(src4, MemOperand(sp, kSystemPointerSize));
- LoadP(src3, MemOperand(sp, 2 * kSystemPointerSize));
- LoadP(src2, MemOperand(sp, 3 * kSystemPointerSize));
- LoadP(src1, MemOperand(sp, 4 * kSystemPointerSize));
+ LoadU64(src5, MemOperand(sp, 0));
+ LoadU64(src4, MemOperand(sp, kSystemPointerSize));
+ LoadU64(src3, MemOperand(sp, 2 * kSystemPointerSize));
+ LoadU64(src2, MemOperand(sp, 3 * kSystemPointerSize));
+ LoadU64(src1, MemOperand(sp, 4 * kSystemPointerSize));
addi(sp, sp, Operand(5 * kSystemPointerSize));
}
@@ -716,7 +711,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(Register destination, MemOperand field_operand);
void DecompressAnyTagged(Register destination, Register source);
- void LoadWord(Register dst, const MemOperand& mem, Register scratch);
+ void LoadU32(Register dst, const MemOperand& mem, Register scratch);
void StoreWord(Register src, const MemOperand& mem, Register scratch);
private:
@@ -743,7 +738,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// TODO(victorgomes): Remove this function once we stick with the reversed
// arguments order.
void LoadReceiver(Register dest, Register argc) {
- LoadP(dest, MemOperand(sp, 0));
+ LoadU64(dest, MemOperand(sp, 0));
}
void StoreReceiver(Register rec, Register argc, Register scratch) {
@@ -761,8 +756,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -770,8 +765,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter exit frame.
// stack_space - extra stack space, used for parameters before call to C.
@@ -837,7 +832,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -848,12 +843,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling
@@ -926,22 +918,22 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, kSaveFPRegs);
+ CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1053,7 +1045,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/chromium/v8/src/codegen/register-arch.h b/chromium/v8/src/codegen/register-arch.h
index 3936ee80cc2..eb4cdb8789b 100644
--- a/chromium/v8/src/codegen/register-arch.h
+++ b/chromium/v8/src/codegen/register-arch.h
@@ -41,6 +41,27 @@ constexpr bool ShouldPadArguments(int argument_count) {
return ArgumentPaddingSlots(argument_count) != 0;
}
+#ifdef DEBUG
+struct CountIfValidRegisterFunctor {
+ template <typename RegType>
+ constexpr int operator()(int count, RegType reg) const {
+ return count + (reg.is_valid() ? 1 : 0);
+ }
+};
+
+template <typename RegType, typename... RegTypes,
+ // All arguments must be either Register or DoubleRegister.
+ typename = typename std::enable_if<
+ base::is_same<Register, RegType, RegTypes...>::value ||
+ base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
+inline constexpr bool AreAliased(RegType first_reg, RegTypes... regs) {
+ int num_different_regs = NumRegs(RegType::ListOf(first_reg, regs...));
+ int num_given_regs =
+ base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
+ return num_different_regs < num_given_regs;
+}
+#endif
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/codegen/register.h b/chromium/v8/src/codegen/register.h
index 57f3a1c62ab..49f67ceb1d3 100644
--- a/chromium/v8/src/codegen/register.h
+++ b/chromium/v8/src/codegen/register.h
@@ -33,7 +33,7 @@ class RegisterBase {
static constexpr SubType no_reg() { return SubType{kCode_no_reg}; }
static constexpr SubType from_code(int code) {
- CONSTEXPR_DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
+ DCHECK(base::IsInRange(code, 0, kNumRegisters - 1));
return SubType{code};
}
@@ -45,7 +45,7 @@ class RegisterBase {
constexpr bool is_valid() const { return reg_code_ != kCode_no_reg; }
constexpr int code() const {
- CONSTEXPR_DCHECK(is_valid());
+ DCHECK(is_valid());
return reg_code_;
}
diff --git a/chromium/v8/src/codegen/reloc-info.cc b/chromium/v8/src/codegen/reloc-info.cc
index 753b34bdbf5..4781e7609be 100644
--- a/chromium/v8/src/codegen/reloc-info.cc
+++ b/chromium/v8/src/codegen/reloc-info.cc
@@ -4,6 +4,7 @@
#include "src/codegen/reloc-info.h"
+#include "src/base/vlq.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/code-reference.h"
#include "src/codegen/external-reference-encoder.h"
@@ -56,11 +57,10 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// the following record in the usual way. The long pc jump record has variable
// length:
// pc-jump: [PC_JUMP] 11
-// [7 bits data] 0
+// 1 [7 bits data]
// ...
-// [7 bits data] 1
-// (Bits 6..31 of pc delta, with leading zeroes
-// dropped, and last non-zero chunk tagged with 1.)
+// 0 [7 bits data]
+// (Bits 6..31 of pc delta, encoded with VLQ.)
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
@@ -75,12 +75,6 @@ const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask;
-const int kChunkBits = 7;
-const int kChunkMask = (1 << kChunkBits) - 1;
-const int kLastChunkTagBits = 1;
-const int kLastChunkTagMask = 1;
-const int kLastChunkTag = 1;
-
uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
// Otherwise write a variable length PC jump for the bits that do
@@ -89,13 +83,12 @@ uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
WriteMode(RelocInfo::PC_JUMP);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
DCHECK_GT(pc_jump, 0);
- // Write kChunkBits size chunks of the pc_jump.
- for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
- byte b = pc_jump & kChunkMask;
- *--pos_ = b << kLastChunkTagBits;
- }
- // Tag the last chunk so it can be identified.
- *pos_ = *pos_ | kLastChunkTag;
+ base::VLQEncodeUnsigned(
+ [this](byte byte) {
+ *--pos_ = byte;
+ return pos_;
+ },
+ pc_jump);
// Return the remaining kSmallPCDeltaBits of the pc_delta.
return pc_delta & kSmallPCDeltaMask;
}
@@ -205,14 +198,8 @@ void RelocIterator::AdvanceReadData() {
void RelocIterator::AdvanceReadLongPCJump() {
// Read the 32-kSmallPCDeltaBits most significant bits of the
- // pc jump in kChunkBits bit chunks and shift them into place.
- // Stop when the last chunk is encountered.
- uint32_t pc_jump = 0;
- for (int i = 0; i < kIntSize; i++) {
- byte pc_jump_part = *--pos_;
- pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits;
- if ((pc_jump_part & kLastChunkTagMask) == 1) break;
- }
+ // pc jump as a VLQ encoded integer.
+ uint32_t pc_jump = base::VLQDecodeUnsigned([this] { return *--pos_; });
// The least significant kSmallPCDeltaBits bits will be added
// later.
rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
@@ -450,7 +437,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "unknown relocation type";
}
-void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
+void RelocInfo::Print(Isolate* isolate, std::ostream& os) {
os << reinterpret_cast<const void*>(pc_) << " " << RelocModeName(rmode_);
if (rmode_ == DEOPT_SCRIPT_OFFSET || rmode_ == DEOPT_INLINING_ID) {
os << " (" << data() << ")";
diff --git a/chromium/v8/src/codegen/reloc-info.h b/chromium/v8/src/codegen/reloc-info.h
index bef433e10b1..e8b3c0b98bb 100644
--- a/chromium/v8/src/codegen/reloc-info.h
+++ b/chromium/v8/src/codegen/reloc-info.h
@@ -328,8 +328,8 @@ class RelocInfo {
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
- void Print(Isolate* isolate, std::ostream& os); // NOLINT
-#endif // ENABLE_DISASSEMBLER
+ void Print(Isolate* isolate, std::ostream& os);
+#endif // ENABLE_DISASSEMBLER
#ifdef VERIFY_HEAP
void Verify(Isolate* isolate);
#endif
diff --git a/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h b/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h
index 40bd56d15b5..d301a00bf4e 100644
--- a/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h
+++ b/chromium/v8/src/codegen/riscv64/assembler-riscv64-inl.h
@@ -63,11 +63,15 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ Assembler::RelocateRelativeReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
- DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@@ -133,9 +137,13 @@ HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
- DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
- return Handle<HeapObject>(reinterpret_cast<Address*>(
- Assembler::target_address_at(pc_, constant_pool_)));
+ if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
+ }
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
@@ -163,11 +171,11 @@ void RelocInfo::set_target_external_reference(
}
Address RelocInfo::target_internal_reference() {
- if (rmode_ == INTERNAL_REFERENCE) {
+ if (IsInternalReference(rmode_)) {
return Memory<Address>(pc_);
} else {
// Encoded internal references are j/jal instructions.
- DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReferenceEncoded(rmode_));
DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize)));
Address address = Assembler::target_address_at(pc_);
return address;
@@ -175,10 +183,20 @@ Address RelocInfo::target_internal_reference() {
}
Address RelocInfo::target_internal_reference_address() {
- DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
+ DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instr instr1 = Assembler::instr_at(pc);
+ Instr instr2 = Assembler::instr_at(pc + kInstrSize);
+ DCHECK(IsAuipc(instr1));
+ DCHECK(IsJalr(instr2));
+ int32_t code_target_index = BrachlongOffset(instr1, instr2);
+ return GetCodeTarget(code_target_index);
+}
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
diff --git a/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc b/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc
index 914ea26f9fe..35c56ccdf53 100644
--- a/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/chromium/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -128,7 +128,8 @@ Register ToRegister(int num) {
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@@ -454,6 +455,16 @@ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
return instr;
}
+static inline Instr SetJalrOffset(int32_t offset, Instr instr) {
+ DCHECK(Assembler::IsJalr(instr));
+ DCHECK(is_int12(offset));
+ instr &= ~kImm12Mask;
+ int32_t imm12 = offset << kImm12Shift;
+ DCHECK(Assembler::IsJalr(instr | (imm12 & kImm12Mask)));
+ DCHECK(Assembler::JalrOffset(instr | (imm12 & kImm12Mask)) == offset);
+ return instr | (imm12 & kImm12Mask);
+}
+
static inline Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr) {
DCHECK(Assembler::IsJal(instr));
int32_t imm = target_pos - pos;
@@ -689,17 +700,36 @@ int Assembler::CJumpOffset(Instr instr) {
int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) {
DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
InstructionBase::kIType);
- const int kImm19_0Mask = ((1 << 20) - 1);
- int32_t imm_auipc = auipc & (kImm19_0Mask << 12);
- int32_t imm_12 = instr_I >> 20;
- int32_t offset = imm_12 + imm_auipc;
+ DCHECK(IsAuipc(auipc));
+ int32_t imm_auipc = AuipcOffset(auipc);
+ int32_t imm12 = (instr_I & kImm12Mask) >> 20;
+ int32_t offset = imm12 + imm_auipc;
return offset;
}
+int Assembler::PatchBranchlongOffset(Address pc, Instr instr_auipc,
+ Instr instr_jalr, int32_t offset) {
+ DCHECK(IsAuipc(instr_auipc));
+ DCHECK(IsJalr(instr_jalr));
+ int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)offset << 20 >> 20;
+ CHECK(is_int32(offset));
+ instr_at_put(pc, SetAuipcOffset(Hi20, instr_auipc));
+ instr_at_put(pc + 4, SetJalrOffset(Lo12, instr_jalr));
+ DCHECK(offset ==
+ BrachlongOffset(Assembler::instr_at(pc), Assembler::instr_at(pc + 4)));
+ return 2;
+}
+
int Assembler::LdOffset(Instr instr) {
DCHECK(IsLd(instr));
int32_t imm12 = (instr & kImm12Mask) >> 20;
- imm12 = imm12 << 12 >> 12;
+ return imm12;
+}
+
+int Assembler::JalrOffset(Instr instr) {
+ DCHECK(IsJalr(instr));
+ int32_t imm12 = (instr & kImm12Mask) >> 20;
return imm12;
}
@@ -717,7 +747,7 @@ bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
}
void Assembler::disassembleInstr(Instr instr) {
- if (!FLAG_debug_riscv) return;
+ if (!FLAG_riscv_debug) return;
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
EmbeddedVector<char, 128> disasm_buffer;
@@ -2567,9 +2597,25 @@ void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch,
// Must not overwrite the register 'base' while loading 'offset'.
DCHECK(src->rm() != scratch);
- RV_li(scratch, src->offset());
- add(scratch, scratch, src->rm());
- src->offset_ = 0;
+ constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
+ constexpr int32_t kMaxOffsetForSimpleAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+ if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) {
+ addi(scratch, src->rm(), kMinOffsetForSimpleAdjustment);
+ src->offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() &&
+ src->offset() < 0) {
+ addi(scratch, src->rm(), -kMinOffsetForSimpleAdjustment);
+ src->offset_ += kMinOffsetForSimpleAdjustment;
+ } else if (access_type == OffsetAccessType::SINGLE_ACCESS) {
+ RV_li(scratch, (static_cast<int64_t>(src->offset()) + 0x800) >> 12 << 12);
+ add(scratch, scratch, src->rm());
+ src->offset_ = src->offset() << 20 >> 20;
+ } else {
+ RV_li(scratch, src->offset());
+ add(scratch, scratch, src->rm());
+ src->offset_ = 0;
+ }
src->rm_ = scratch;
}
@@ -2596,6 +2642,22 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
}
}
+void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ Instr instr = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
+ if (IsAuipc(instr) && IsJalr(instr1)) {
+ int32_t imm;
+ imm = BrachlongOffset(instr, instr1);
+ imm -= pc_delta;
+ PatchBranchlongOffset(pc, instr, instr1, imm);
+ return;
+ } else {
+ UNREACHABLE();
+ }
+}
+
void Assembler::GrowBuffer() {
DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_);
// Compute new buffer size.
@@ -2766,12 +2828,23 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) {
Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) {
- DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
- int32_t Hi20 = AuipcOffset(*instr);
- int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
- Memory<Address>(pc + Hi20 + Lo12) = target;
- if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
+ if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ Memory<Address>(pc + Hi20 + Lo12) = target;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
+ }
+ } else {
+ DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
+ int64_t imm = (int64_t)target - (int64_t)pc;
+ Instr instr = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ DCHECK(is_int32(imm));
+ int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, num * kInstrSize);
+ }
}
} else {
set_target_address_at(pc, target, icache_flush_mode);
@@ -2781,10 +2854,17 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) {
- DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4)));
- int32_t Hi20 = AuipcOffset(*instr);
- int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
- return Memory<Address>(pc + Hi20 + Lo12);
+ if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
+ return Memory<Address>(pc + Hi20 + Lo12);
+ } else {
+ DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
+ int32_t Hi20 = AuipcOffset(*instr);
+ int32_t Lo12 = JalrOffset(*reinterpret_cast<Instr*>(pc + 4));
+ return pc + Hi20 + Lo12;
+ }
+
} else {
return target_address_at(pc);
}
@@ -2890,17 +2970,17 @@ bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
bool Assembler::IsConstantPoolAt(Instruction* instr) {
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
- // 0: ld x0, t3, #offset
+ // 0: ld x0, x0, #offset
Instr instr_value = *reinterpret_cast<Instr*>(instr);
-
- bool result = IsLd(instr_value) && (instr->RdValue() == kRegCode_zero_reg);
- // It is still worth asserting the marker is complete.
- // 4: j 0
+ bool result = IsLd(instr_value) && (instr->Rs1Value() == kRegCode_zero_reg) &&
+ (instr->RdValue() == kRegCode_zero_reg);
#ifdef DEBUG
- Instruction* instr_fllowing = instr + kInstrSize;
- DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_fllowing)) &&
- instr_fllowing->Imm20JValue() == 0 &&
- instr_fllowing->RdValue() == kRegCode_zero_reg));
+ // It is still worth asserting the marker is complete.
+ // 1: j 0x0
+ Instruction* instr_following = instr + kInstrSize;
+ DCHECK(!result || (IsJal(*reinterpret_cast<Instr*>(instr_following)) &&
+ instr_following->Imm20JValue() == 0 &&
+ instr_following->RdValue() == kRegCode_zero_reg));
#endif
return result;
}
@@ -2941,9 +3021,9 @@ void ConstantPool::EmitPrologue(Alignment require_alignment) {
int ConstantPool::PrologueSize(Jump require_jump) const {
// Prologue is:
- // j over ;; if require_jump
- // ld x0, t3, #pool_size
- // j xzr
+ // j over ;; if require_jump
+ // ld x0, x0, #pool_size
+ // j 0x0
int prologue_size = require_jump == Jump::kRequired ? kInstrSize : 0;
prologue_size += 2 * kInstrSize;
return prologue_size;
@@ -2954,7 +3034,7 @@ void ConstantPool::SetLoadOffsetToConstPoolEntry(int load_offset,
const ConstantPoolKey& key) {
Instr instr_auipc = assm_->instr_at(load_offset);
Instr instr_ld = assm_->instr_at(load_offset + 4);
- // Instruction to patch must be 'ld t3, t3, offset' with offset == kInstrSize.
+ // Instruction to patch must be 'ld rd, offset(rd)' with 'offset == 0'.
DCHECK(assm_->IsAuipc(instr_auipc));
DCHECK(assm_->IsLd(instr_ld));
DCHECK_EQ(assm_->LdOffset(instr_ld), 0);
diff --git a/chromium/v8/src/codegen/riscv64/assembler-riscv64.h b/chromium/v8/src/codegen/riscv64/assembler-riscv64.h
index 1dcf4e0aae1..ff66351d6a4 100644
--- a/chromium/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -53,7 +53,7 @@ namespace v8 {
namespace internal {
#define DEBUG_PRINTF(...) \
- if (FLAG_debug_riscv) { \
+ if (FLAG_riscv_debug) { \
printf(__VA_ARGS__); \
}
@@ -160,6 +160,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
virtual ~Assembler() { CHECK(constpool_.IsEmpty()); }
+ void AbortedCodeGeneration() { constpool_.Clear(); }
// GetCode emits any pending (non-emitted) code and fills the descriptor desc.
static constexpr int kNoHandlerTable = 0;
static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
@@ -208,11 +209,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Get offset from instr.
int BranchOffset(Instr instr);
- int BrachlongOffset(Instr auipc, Instr jalr);
+ static int BrachlongOffset(Instr auipc, Instr jalr);
+ static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I,
+ int32_t offset);
int JumpOffset(Instr instr);
int CJumpOffset(Instr instr);
static int LdOffset(Instr instr);
static int AuipcOffset(Instr instr);
+ static int JalrOffset(Instr instr);
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
@@ -800,6 +804,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
+ static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
@@ -862,8 +868,40 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsLd(Instr instr);
void CheckTrampolinePool();
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
inline int UnboundLabelsCount() { return unbound_labels_count_; }
+ using BlockPoolsScope = BlockTrampolinePoolScope;
+
+ void RecordConstPool(int size);
+
+ void ForceConstantPoolEmissionWithoutJump() {
+ constpool_.Check(Emission::kForced, Jump::kOmitted);
+ }
+ void ForceConstantPoolEmissionWithJump() {
+ constpool_.Check(Emission::kForced, Jump::kRequired);
+ }
+ // Check if the const pool needs to be emitted while pretending that {margin}
+ // more bytes of instructions have already been emitted.
+ void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
+ }
+
+ void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
+ constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
+ }
+
+ void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
+ void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
+ constpool_.RecordEntry(data, rmode);
+ }
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -949,34 +987,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
}
- using BlockPoolsScope = BlockTrampolinePoolScope;
-
- void RecordConstPool(int size);
-
- void ForceConstantPoolEmissionWithoutJump() {
- constpool_.Check(Emission::kForced, Jump::kOmitted);
- }
- void ForceConstantPoolEmissionWithJump() {
- constpool_.Check(Emission::kForced, Jump::kRequired);
- }
- // Check if the const pool needs to be emitted while pretending that {margin}
- // more bytes of instructions have already been emitted.
- void EmitConstPoolWithJumpIfNeeded(size_t margin = 0) {
- constpool_.Check(Emission::kIfNeeded, Jump::kRequired, margin);
- }
-
- void EmitConstPoolWithoutJumpIfNeeded(size_t margin = 0) {
- constpool_.Check(Emission::kIfNeeded, Jump::kOmitted, margin);
- }
-
- void RecordEntry(uint32_t data, RelocInfo::Mode rmode) {
- constpool_.RecordEntry(data, rmode);
- }
-
- void RecordEntry(uint64_t data, RelocInfo::Mode rmode) {
- constpool_.RecordEntry(data, rmode);
- }
-
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@@ -1231,6 +1241,16 @@ class V8_EXPORT_PRIVATE UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
private:
RegList* available_;
diff --git a/chromium/v8/src/codegen/riscv64/constants-riscv64.cc b/chromium/v8/src/codegen/riscv64/constants-riscv64.cc
index 045488bf7fa..d2709dc2c7c 100644
--- a/chromium/v8/src/codegen/riscv64/constants-riscv64.cc
+++ b/chromium/v8/src/codegen/riscv64/constants-riscv64.cc
@@ -106,8 +106,11 @@ int FPURegisters::Number(const char* name) {
}
InstructionBase::Type InstructionBase::InstructionType() const {
+ if (IsIllegalInstruction()) {
+ return kUnsupported;
+ }
// RV64C Instruction
- if (IsShortInstruction()) {
+ if (FLAG_riscv_c_extension && IsShortInstruction()) {
switch (InstructionBits() & kRvcOpcodeMask) {
case RO_C_ADDI4SPN:
return kCIWType;
diff --git a/chromium/v8/src/codegen/riscv64/constants-riscv64.h b/chromium/v8/src/codegen/riscv64/constants-riscv64.h
index 3b5ffff6dac..c8f54d8f7f9 100644
--- a/chromium/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/constants-riscv64.h
@@ -8,6 +8,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
// UNIMPLEMENTED_ macro for RISCV.
#ifdef DEBUG
@@ -55,8 +56,7 @@ const uint32_t kLessSignificantWordInDoublewordOffset = 4;
namespace v8 {
namespace internal {
-// TODO(sigurds): Change this value once we use relative jumps.
-constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
@@ -727,18 +727,25 @@ class InstructionBase {
kUnsupported = -1
};
+ inline bool IsIllegalInstruction() const {
+ uint8_t FirstHalfWord = *reinterpret_cast<const uint16_t*>(this);
+ return FirstHalfWord == 0;
+ }
+
inline bool IsShortInstruction() const {
uint8_t FirstByte = *reinterpret_cast<const uint8_t*>(this);
return (FirstByte & 0x03) <= C2;
}
inline uint8_t InstructionSize() const {
- return this->IsShortInstruction() ? kShortInstrSize : kInstrSize;
+ return (FLAG_riscv_c_extension && this->IsShortInstruction())
+ ? kShortInstrSize
+ : kInstrSize;
}
// Get the raw instruction bits.
inline Instr InstructionBits() const {
- if (this->IsShortInstruction()) {
+ if (FLAG_riscv_c_extension && this->IsShortInstruction()) {
return 0x0000FFFF & (*reinterpret_cast<const ShortInstr*>(this));
}
return *reinterpret_cast<const Instr*>(this);
diff --git a/chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h b/chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
new file mode 100644
index 00000000000..4a8bb0d9ee6
--- /dev/null
+++ b/chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64-inl.h
@@ -0,0 +1,265 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
+#define V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
+
+#if V8_TARGET_ARCH_RISCV64
+
+#include "src/base/template-utils.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, a1, a2, a3, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(a0, a1, a2, a3, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return a2;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ // a2: start index (to supported rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a3: new target
+ // a1: target
+ // a0: number of arguments
+ // a2: start index (to supported rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a3: new target
+ // a1: target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(a1, // kApiFunctionAddress
+ a2, // kArgc
+ a3, // kCallData
+ a0); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(a0, // argument count (not including receiver)
+ a2, // address of first argument
+ a1); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ a0, // argument count (not including receiver)
+ a4, // address of the first argument
+ a1, // constructor to call
+ a3, // new target
+ a2); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(a0, // the value to pass to the generator
+ a1); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_RISCV64
+
+#endif // V8_CODEGEN_RISCV64_INTERFACE_DESCRIPTORS_RISCV64_INL_H_
diff --git a/chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc b/chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
deleted file mode 100644
index 23953097cd1..00000000000
--- a/chromium/v8/src/codegen/riscv64/interface-descriptors-riscv64.cc
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_RISCV64
-
-#include "src/codegen/interface-descriptors.h"
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3};
- CHECK_EQ(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void WasmI64AtomicWait32Descriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, a4};
- CHECK_EQ(static_cast<size_t>(kParameterCount - kStackArgumentsCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount - kStackArgumentsCount,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {a0, a1, a2, a3, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0, a1, a2, a3, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return a1; }
-const Register LoadDescriptor::NameRegister() { return a2; }
-const Register LoadDescriptor::SlotRegister() { return a0; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return a4;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return a1; }
-const Register StoreDescriptor::NameRegister() { return a2; }
-const Register StoreDescriptor::ValueRegister() { return a0; }
-const Register StoreDescriptor::SlotRegister() { return a4; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return a4; }
-const Register StoreTransitionDescriptor::VectorRegister() { return a3; }
-const Register StoreTransitionDescriptor::MapRegister() { return a5; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return a0; }
-const Register ApiGetterDescriptor::CallbackRegister() { return a3; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a0: number of arguments
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : function template info
- // a0 : number of arguments (on the stack, not including receiver)
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a2 : the object to spread
- Register registers[] = {a1, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a2 : the arguments list
- Register registers[] = {a1, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a4 : arguments list length (untagged)
- // a2 : arguments list (FixedArray)
- Register registers[] = {a1, a3, a0, a4, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: the target to call
- // a3: new target
- // a0: number of arguments
- // a2: start index (to support rest parameters)
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a0 : number of arguments (on the stack, not including receiver)
- // a1 : the target to call
- // a3 : the new target
- // a2 : the object to spread
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1 : the target to call
- // a3 : the new target
- // a2 : the arguments list
- Register registers[] = {a1, a3, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // a1: target
- // a3: new target
- // a0: number of arguments
- // a2: allocation site or undefined
- Register registers[] = {a1, a3, a0, a2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a0};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // kApiFunctionAddress
- a2, // kArgc
- a3, // kCallData
- a0, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a2, // address of first argument
- a1 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // argument count (not including receiver)
- a4, // address of the first argument
- a1, // constructor to call
- a3, // new target
- a2, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a0, // the value to pass to the generator
- a1 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- a1, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a0, a1};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index ff798da0e9f..801a74f5698 100644
--- a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -61,7 +62,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -86,7 +87,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -97,7 +98,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopFPU(kCallerSavedFPU);
bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
}
@@ -175,7 +176,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -184,7 +185,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kPointerSize));
Add64(dst, object, Operand(offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -197,13 +198,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
}
@@ -336,14 +337,14 @@ void MacroAssembler::RecordWrite(Register object, Register address,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK(!AreAliased(object, address, value, kScratchReg));
Ld(kScratchReg, MemOperand(address));
Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite,
kScratchReg, Operand(value));
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -353,7 +354,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
@@ -378,7 +379,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
@@ -393,6 +394,10 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addiw(rd, rs, rt.immediate() / 2);
+ addiw(rd, rd, rt.immediate() - (rt.immediate() / 2));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
@@ -409,6 +414,10 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
} else {
if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
addi(rd, rs, static_cast<int32_t>(rt.immediate()));
+ } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
+ (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
+ addi(rd, rs, rt.immediate() / 2);
+ addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
} else {
// li handles the relocation.
UseScratchRegisterScope temps(this);
@@ -429,6 +438,10 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
addiw(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addiw(rd, rs, -rt.immediate() / 2);
+ addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -452,6 +465,10 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
addi(rd, rs,
static_cast<int32_t>(
-rt.immediate())); // No subi instr, use addi(x, y, -imm).
+ } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
+ (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
+ addi(rd, rs, -rt.immediate() / 2);
+ addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
} else {
int li_count = InstrCountForLi64Bit(rt.immediate());
int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
@@ -884,6 +901,7 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (rt.is_reg()) {
negw(scratch, rt.rm());
sllw(scratch, rs, scratch);
@@ -908,6 +926,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
if (rt.is_reg()) {
negw(scratch, rt.rm());
sll(scratch, rs, scratch);
@@ -928,9 +947,10 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
}
void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
- uint8_t sa, Register scratch) {
+ uint8_t sa) {
DCHECK(sa >= 1 && sa <= 31);
- Register tmp = rd == rt ? scratch : rd;
+ UseScratchRegisterScope temps(this);
+ Register tmp = rd == rt ? temps.Acquire() : rd;
DCHECK(tmp != rt);
slli(tmp, rs, sa);
Add64(rd, rt, tmp);
@@ -1215,8 +1235,9 @@ void TurboAssembler::Uld(Register rd, const MemOperand& rs) {
// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
// bits,
// second word in high bits.
-void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
- Register scratch) {
+void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Lwu(rd, rs);
Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
slli(scratch, scratch, 32);
@@ -1228,8 +1249,9 @@ void TurboAssembler::Usd(Register rd, const MemOperand& rs) {
}
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
-void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
- Register scratch) {
+void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
Sw(rd, rs);
srai(scratch, rd, 32);
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
@@ -1464,7 +1486,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
UseScratchRegisterScope temps(this);
int count = li_estimate(j.immediate(), temps.hasAvailable());
int reverse_count = li_estimate(~j.immediate(), temps.hasAvailable());
- if (!FLAG_disable_riscv_constant_pool && count >= 4 && reverse_count >= 4) {
+ if (FLAG_riscv_constant_pool && count >= 4 && reverse_count >= 4) {
// Ld a Address from a constant pool.
RecordEntry((uint64_t)j.immediate(), j.rmode());
auipc(rd, 0);
@@ -1864,6 +1886,28 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src,
fmv_s(dst, src);
}
}
+ {
+ Label not_NaN;
+ UseScratchRegisterScope temps2(this);
+ Register scratch = temps2.Acquire();
+ // According to the wasm spec
+ // (https://webassembly.github.io/spec/core/exec/numerics.html#aux-nans)
+ // if input is canonical NaN, then output is canonical NaN, and if input is
+ // any other NaN, then output is any NaN with most significant bit of
+ // payload is 1. In RISC-V, feq_d will set scratch to 0 if src is a NaN. If
+ // src is not a NaN, branch to the label and do nothing, but if it is,
+ // fmin_d will set dst to the canonical NaN.
+ if (std::is_same<F, double>::value) {
+ feq_d(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_d(dst, src, src);
+ } else {
+ feq_s(scratch, src, src);
+ bnez(scratch, &not_NaN);
+ fmin_s(dst, src, src);
+ }
+ bind(&not_NaN);
+ }
// If real exponent (i.e., t6 - kFloatExponentBias) is greater than
// kFloat32MantissaBits, it means the floating-point value has no fractional
@@ -2030,8 +2074,8 @@ void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
}
}
-void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
- FPURegister cmp2) {
+void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
@@ -2039,11 +2083,10 @@ void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
feq_s(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
feq_s(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
- Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
-void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
- FPURegister cmp2) {
+void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
Register scratch = temps.Acquire();
@@ -2051,7 +2094,18 @@ void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
feq_d(rd, cmp1, cmp1); // rd <- !isNan(cmp1)
feq_d(scratch, cmp2, cmp2); // scratch <- !isNaN(cmp2)
And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2)
- Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
+}
+
+void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1,
+ FPURegister cmp2) {
+ CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2)
+ Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2)
}
void TurboAssembler::BranchTrueShortF(Register rs, Label* target) {
@@ -2949,9 +3003,20 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
-
- if (root_array_available_ && options().isolate_independent_code &&
- target_is_isolate_independent_builtin) {
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), rs, rt);
+ }
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ GenPCRelativeJump(t6, code_target_index);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
Ld(t6, MemOperand(kRootRegister, offset));
@@ -3017,8 +3082,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
- if (root_array_available_ && options().isolate_independent_code &&
- target_is_isolate_independent_builtin) {
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ Label skip;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), rs, rt);
+ }
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ GenPCRelativeJumpAndLink(t6, code_target_index);
+ bind(&skip);
+ RecordComment("]");
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code &&
+ target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset();
LoadRootRelative(t6, offset);
@@ -3059,6 +3138,46 @@ void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
Call(builtin_index);
}
+void TurboAssembler::CallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ if (options().short_builtin_calls) {
+ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ Call(entry, RelocInfo::OFF_HEAP_TARGET);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::TailCallBuiltin(int builtin_index) {
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
+ RecordCommentForOffHeapTrampoline(builtin_index);
+ CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
+ EmbeddedData d = EmbeddedData::FromBlob(isolate());
+ Address entry = d.InstructionStartOfBuiltin(builtin_index);
+ if (options().short_builtin_calls) {
+ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ Jump(entry, RelocInfo::OFF_HEAP_TARGET);
+ }
+ RecordComment("]");
+}
+
+void TurboAssembler::LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination) {
+ Ld(destination, EntryFromBuiltinIndexAsOperand(builtin_index));
+}
+
+MemOperand TurboAssembler::EntryFromBuiltinIndexAsOperand(
+ Builtins::Name builtin_index) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin_index));
+}
+
void TurboAssembler::PatchAndJump(Address target) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
@@ -3115,16 +3234,28 @@ void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
}
}
+void TurboAssembler::GenPCRelativeJump(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
+void TurboAssembler::GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
+ DCHECK(is_int32(imm32));
+ int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
+ int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
+ auipc(rd, Hi20); // Read PC + Hi20 into scratch.
+ jalr(rd, Lo12); // jump PC + Hi20 + Lo12
+}
+
void TurboAssembler::BranchLong(Label* L) {
// Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64;
imm64 = branch_long_offset(L);
- DCHECK(is_int32(imm64));
- int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- auipc(t6, Hi20); // Read PC + Hi20 into scratch.
- jr(t6, Lo12); // jump PC + Hi20 + Lo12
+ GenPCRelativeJump(t6, imm64);
EmitConstPoolWithJumpIfNeeded();
}
@@ -3133,11 +3264,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64;
imm64 = branch_long_offset(L);
- DCHECK(is_int32(imm64));
- int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
- int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- auipc(t6, Hi20); // Read PC + Hi20 into scratch.
- jalr(t6, Lo12); // jump PC + Hi20 + Lo12 and read PC + 4 to ra
+ GenPCRelativeJumpAndLink(t6, imm64);
}
void TurboAssembler::DropAndRet(int drop) {
@@ -3251,14 +3378,6 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
push(scratch);
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- li(a1, ExternalReference::debug_restart_fp_address(isolate()));
- Ld(a1, MemOperand(a1));
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne, a1, Operand(zero_reg));
-}
-
// ---------------------------------------------------------------------------
// Exception handling.
@@ -3294,16 +3413,10 @@ void MacroAssembler::PopStackHandler() {
void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
const DoubleRegister src) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
- Label NotNaN;
-
- fmv_d(dst, src);
- feq_d(scratch, src, src);
- bne(scratch, zero_reg, &NotNaN);
- RV_li(scratch, 0x7ff8000000000000ULL); // This is the canonical NaN
- fmv_d_x(dst, scratch);
- bind(&NotNaN);
+ // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
+ // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
+ // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
+ fsub_d(dst, src, kDoubleRegZero);
}
void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
@@ -3414,7 +3527,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// a0: actual arguments count
@@ -3524,9 +3637,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, a1);
DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
@@ -3540,17 +3653,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
Ld(code, FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
@@ -3560,9 +3675,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register function, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -3576,15 +3691,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(a1, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
@@ -3593,7 +3708,7 @@ void MacroAssembler::InvokeFunction(Register function,
Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
// ---------------------------------------------------------------------------
@@ -3734,15 +3849,15 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
PrepareCEntryFunction(builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
// Ld a Address from a constant pool.
// Record a value into constant pool.
- if (FLAG_disable_riscv_constant_pool) {
+ if (!FLAG_riscv_constant_pool) {
li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
} else {
RecordEntry(entry, RelocInfo::OFF_HEAP_TARGET);
@@ -3795,7 +3910,7 @@ void TurboAssembler::DebugBreak() { stop(); }
void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
Operand rt) {
- if (emit_debug_code()) Check(cc, reason, rs, rt);
+ if (FLAG_debug_code) Check(cc, reason, rs, rt);
}
void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
@@ -3810,11 +3925,11 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs,
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -3882,19 +3997,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- int stack_offset = -3 * kPointerSize;
- const int fp_offset = 1 * kPointerSize;
- addi(sp, sp, stack_offset);
- stack_offset = -stack_offset - kPointerSize;
- Sd(ra, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- Sd(fp, MemOperand(sp, stack_offset));
- stack_offset -= kPointerSize;
- li(scratch, Operand(StackFrame::TypeToMarker(type)));
- Sd(scratch, MemOperand(sp, stack_offset));
- // Adjust FP to point to saved FP.
- DCHECK_EQ(stack_offset, 0);
- Add64(fp, sp, Operand(fp_offset));
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ Push(scratch);
+ }
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
@@ -3935,7 +4043,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set up new frame pointer.
addi(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -4011,11 +4119,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
Ld(cp, MemOperand(scratch));
-#ifdef DEBUG
- li(scratch,
- ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
- Sd(a3, MemOperand(scratch));
-#endif
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temp(this);
+ Register scratch2 = temp.Acquire();
+ li(scratch2, Operand(Context::kInvalidContext));
+ Sd(scratch2, MemOperand(scratch));
+ }
// Pop the arguments, restore registers, and return.
mv(sp, fp); // Respect ABI stack constraint.
@@ -4026,7 +4135,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
if (argument_count_is_length) {
add(sp, sp, argument_count);
} else {
- CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2, scratch);
+ CalcScaledAddress(sp, sp, argument_count, kPointerSizeLog2);
}
}
@@ -4054,7 +4163,7 @@ int TurboAssembler::ActivationFrameAlignment() {
}
void MacroAssembler::AssertStackIsAligned() {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
const int frame_alignment = ActivationFrameAlignment();
const int frame_alignment_mask = frame_alignment - 1;
@@ -4084,22 +4193,24 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
}
-void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch) {
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(smi_label, eq, scratch, Operand(zero_reg));
}
-void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch) {
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
DCHECK_EQ(0, kSmiTag);
andi(scratch, value, kSmiTagMask);
Branch(not_smi_label, ne, scratch, Operand(zero_reg));
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4108,7 +4219,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
andi(kScratchReg, object, kSmiTagMask);
@@ -4117,7 +4228,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK(object != kScratchReg);
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
@@ -4134,7 +4245,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4151,7 +4262,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4165,7 +4276,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
BlockTrampolinePoolScope block_trampoline_pool(this);
STATIC_ASSERT(kSmiTag == 0);
DCHECK(object != kScratchReg);
@@ -4193,7 +4304,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
LoadRoot(scratch, RootIndex::kUndefinedValue);
@@ -4229,11 +4340,11 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1,
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
if (std::is_same<float, F_TYPE>::value) {
- CompareIsNanF32(scratch, src1, src2);
+ CompareIsNotNanF32(scratch, src1, src2);
} else {
- CompareIsNanF64(scratch, src1, src2);
+ CompareIsNotNanF64(scratch, src1, src2);
}
- BranchTrueF(scratch, &nan);
+ BranchFalseF(scratch, &nan);
if (kind == MaxMinKind::kMax) {
if (std::is_same<float, F_TYPE>::value) {
@@ -4330,11 +4441,9 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
void TurboAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
BlockTrampolinePoolScope block_trampoline_pool(this);
- li(scratch, function);
- CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments);
+ li(t6, function);
+ CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments);
}
void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
@@ -4363,7 +4472,7 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// PrepareCallCFunction.
#if V8_HOST_ARCH_RISCV64
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
@@ -4387,12 +4496,9 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
{
- UseScratchRegisterScope temps(this);
- Register func_scratch = temps.Acquire();
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (function != func_scratch) {
- mv(func_scratch, function);
- function = func_scratch;
+ if (function != t6) {
+ mv(t6, function);
+ function = t6;
}
// Save the frame pointer and PC so that the stack layout remains
@@ -4401,7 +4507,6 @@ void TurboAssembler::CallCFunctionHelper(Register function,
// 't' registers are caller-saved so this is safe as a scratch register.
Register pc_scratch = t1;
Register scratch = t2;
- DCHECK(!AreAliased(pc_scratch, scratch, function));
auipc(pc_scratch, 0);
// TODO(RISCV): Does this need an offset? It seems like this should be the
@@ -4494,12 +4599,10 @@ void TurboAssembler::ResetSpeculationPoisonRegister() {
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
Label* exit, DeoptimizeKind kind,
Label* ret, Label*) {
- UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
- Ld(scratch,
+ Ld(t6,
MemOperand(kRootRegister, IsolateData::builtin_entry_slot_offset(target)));
- Call(scratch);
+ Call(t6);
DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
(kind == DeoptimizeKind::kLazy)
? Deoptimizer::kLazyDeoptExitSize
diff --git a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index b260f1c2009..81e55656066 100644
--- a/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/riscv64/assembler-riscv64.h"
#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
@@ -53,8 +54,6 @@ enum LiFlags {
ADDRESS_LOAD = 2
};
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -166,6 +165,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Compare double, if any operand is NaN, result is false except for NE
void CompareF64(Register rd, FPUCondition cc, FPURegister cmp1,
FPURegister cmp2);
+ void CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
+ void CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
void CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2);
void CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2);
@@ -187,6 +188,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
li(rd, Operand(j), mode);
}
+ inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
+
void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
void li(Register dst, const StringConstantBase* string,
@@ -197,6 +200,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
+ inline void GenPCRelativeJump(Register rd, int64_t imm32);
+ inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
@@ -223,7 +228,20 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltinIndex(Register builtin_index);
+ void LoadEntryFromBuiltinIndex(Builtins::Name builtin_index,
+ Register destination);
+ MemOperand EntryFromBuiltinIndexAsOperand(Builtins::Name builtin_index);
void CallBuiltinByIndex(Register builtin_index) override;
+ void CallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return CallBuiltin(static_cast<int>(builtin));
+ }
+ void CallBuiltin(int builtin_index);
+ void TailCallBuiltin(Builtins::Name builtin) {
+ // TODO(11527): drop the int overload in favour of the Builtins::Name one.
+ return TailCallBuiltin(static_cast<int>(builtin));
+ }
+ void TailCallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) override;
void CallCodeObject(Register code_object) override;
@@ -799,7 +817,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Ceil_s_s(FPURegister fd, FPURegister fs, FPURegister fpu_scratch);
// Jump the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label, Register scratch = t3);
+ void JumpIfSmi(Register value, Label* smi_label);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
Branch(dest, eq, a, Operand(b));
@@ -816,8 +834,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
static int ActivationFrameAlignment();
// Calculated scaled address (rd) as rt + rs << sa
- void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa,
- Register scratch = t3);
+ void CalcScaledAddress(Register rd, Register rs, Register rt, uint8_t sa);
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
@@ -953,8 +970,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
RAStatus ra_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -962,16 +979,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value, RAStatus ra_status,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// void Pref(int32_t hint, const MemOperand& rs);
// ---------------------------------------------------------------------------
// Pseudo-instructions.
- void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
- void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = t3);
+ void LoadWordPair(Register rd, const MemOperand& rs);
+ void StoreWordPair(Register rd, const MemOperand& rs);
void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
@@ -1011,7 +1028,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1022,12 +1039,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support.
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling.
@@ -1051,18 +1065,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1131,8 +1145,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch = t3);
+ void JumpIfNotSmi(Register value, Label* not_smi_label);
// Abort execution if argument is a smi, enabled via --debug-code.
void AssertNotSmi(Register object);
@@ -1170,7 +1183,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/chromium/v8/src/codegen/riscv64/register-riscv64.h b/chromium/v8/src/codegen/riscv64/register-riscv64.h
index b97594becda..4aacad611dd 100644
--- a/chromium/v8/src/codegen/riscv64/register-riscv64.h
+++ b/chromium/v8/src/codegen/riscv64/register-riscv64.h
@@ -337,7 +337,7 @@ constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = a3;
constexpr Register kJavaScriptCallExtraArg1Register = a2;
-constexpr Register kOffHeapTrampolineRegister = t3;
+constexpr Register kOffHeapTrampolineRegister = t6;
constexpr Register kRuntimeCallFunctionRegister = a1;
constexpr Register kRuntimeCallArgCountRegister = a0;
constexpr Register kRuntimeCallArgvRegister = a2;
diff --git a/chromium/v8/src/codegen/s390/assembler-s390.cc b/chromium/v8/src/codegen/s390/assembler-s390.cc
index dd5f59bc0bc..da51395dfd5 100644
--- a/chromium/v8/src/codegen/s390/assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/assembler-s390.cc
@@ -160,7 +160,11 @@ static bool supportsSTFLE() {
}
bool CpuFeatures::SupportsWasmSimd128() {
+#if V8_ENABLE_WEBASSEMBLY
return CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1);
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
}
void CpuFeatures::ProbeImpl(bool cross_compile) {
diff --git a/chromium/v8/src/codegen/s390/interface-descriptors-s390-inl.h b/chromium/v8/src/codegen/s390/interface-descriptors-s390-inl.h
new file mode 100644
index 00000000000..d672c4354dd
--- /dev/null
+++ b/chromium/v8/src/codegen/s390/interface-descriptors-s390-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_S390_INTERFACE_DESCRIPTORS_S390_INL_H_
+#define V8_CODEGEN_S390_INTERFACE_DESCRIPTORS_S390_INL_H_
+
+#if V8_TARGET_ARCH_S390
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(r2, r3, r4, r5, r6);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(r2, r3, r4, r5, r6, kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(r2, r3, r4, r5, cp);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(r2, r3, r4, r5, r6, kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return r3; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return r4; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return r2; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return r6;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return r3; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return r4; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return r2; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return r6; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r7; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return r2; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return r5; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r5;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ // TODO(v8:11421): Implement on this platform.
+ return r6;
+}
+
+// static
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(r5); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // r2 : number of arguments
+ // r3 : the target to call
+ return RegisterArray(r3, r2);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r6 : arguments list length (untagged)
+ // r4 : arguments list (FixedArray)
+ return RegisterArray(r3, r2, r6, r4);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // r2 : number of arguments
+ // r4 : start index (to support rest parameters)
+ // r3 : the target to call
+ return RegisterArray(r3, r2, r4);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // r3 : function template info
+ // r4 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(r3, r4);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // r2: number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r4 : the object to spread
+ return RegisterArray(r3, r2, r4);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // r3 : the target to call
+ // r4 : the arguments list
+ return RegisterArray(r3, r4);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r5 : the new target
+ // r6 : arguments list length (untagged)
+ // r4 : arguments list (FixedArray)
+ return RegisterArray(r3, r5, r2, r6, r4);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // r2 : number of arguments
+ // r5 : the new target
+ // r4 : start index (to support rest parameters)
+ // r3 : the target to call
+ return RegisterArray(r3, r5, r2, r4);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // r2 : number of arguments (on the stack, not including receiver)
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : the object to spread
+ return RegisterArray(r3, r5, r2, r4);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : the arguments list
+ return RegisterArray(r3, r5, r4);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // r2 : number of arguments
+ // r3 : the target to call
+ // r5 : the new target
+ // r4 : allocation site or undefined
+ return RegisterArray(r3, r5, r2, r4);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(r3); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(r3, r2); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r3, r2); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // TODO(v8:11421): Implement on this platform.
+ return DefaultRegisterArray();
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(r3, // kApiFunctionAddress
+ r4, // kArgc
+ r5, // kCallData
+ r2); // kHolder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(r2, // argument count (not including receiver)
+ r4, // address of first argument
+ r3); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ r2, // argument count (not including receiver)
+ r6, // address of the first argument
+ r3, // constructor to call
+ r5, // new target
+ r4); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(r2, // the value to pass to the generator
+ r3); // the JSGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(r2, r3);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_S390
+
+#endif // V8_CODEGEN_S390_INTERFACE_DESCRIPTORS_S390_INL_H_
diff --git a/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc b/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
deleted file mode 100644
index 9a9ecdcb8ba..00000000000
--- a/chromium/v8/src/codegen/s390/interface-descriptors-s390.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {r2, r3, r4, r5, cp};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {r2, r3, r4, r5, r6};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return r3; }
-const Register LoadDescriptor::NameRegister() { return r4; }
-const Register LoadDescriptor::SlotRegister() { return r2; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return r5; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return r6;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return r3; }
-const Register StoreDescriptor::NameRegister() { return r4; }
-const Register StoreDescriptor::ValueRegister() { return r2; }
-const Register StoreDescriptor::SlotRegister() { return r6; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return r5; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return r6; }
-const Register StoreTransitionDescriptor::VectorRegister() { return r5; }
-const Register StoreTransitionDescriptor::MapRegister() { return r7; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return r2; }
-const Register ApiGetterDescriptor::CallbackRegister() { return r5; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() {
- // TODO(v8:11421): Implement on this platform.
- UNREACHABLE();
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return r2; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r3 : the target to call
- Register registers[] = {r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r6 : arguments list length (untagged)
- // r4 : arguments list (FixedArray)
- Register registers[] = {r3, r2, r6, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r4 : start index (to support rest parameters)
- // r3 : the target to call
- Register registers[] = {r3, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : function template info
- // r4 : number of arguments (on the stack, not including receiver)
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r4 : the object to spread
- Register registers[] = {r3, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : the target to call
- // r4 : the arguments list
- Register registers[] = {r3, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r5 : the new target
- // r6 : arguments list length (untagged)
- // r4 : arguments list (FixedArray)
- Register registers[] = {r3, r5, r2, r6, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r5 : the new target
- // r4 : start index (to support rest parameters)
- // r3 : the target to call
- Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments (on the stack, not including receiver)
- // r3 : the target to call
- // r5 : the new target
- // r4 : the object to spread
- Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r3 : the target to call
- // r5 : the new target
- // r4 : the arguments list
- Register registers[] = {r3, r5, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // r2 : number of arguments
- // r3 : the target to call
- // r5 : the new target
- // r4 : allocation site or undefined
- Register registers[] = {r3, r5, r2, r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // TODO(v8:11421): Implement on this platform.
- InitializePlatformUnimplemented(data, kParameterCount);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // kApiFunctionAddress
- r4, // kArgc
- r5, // kCallData
- r2, // kHolder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r2, // argument count (not including receiver)
- r4, // address of first argument
- r3 // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r2, // argument count (not including receiver)
- r6, // address of the first argument
- r3, // constructor to call
- r5, // new target
- r4, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r2, // the value to pass to the generator
- r3 // the JSGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- r3, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r2, r3};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
index be5798d8d4f..de25a93d8b3 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -12,6 +12,7 @@
#include "src/codegen/callable.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/debug/debug.h"
@@ -40,6 +41,12 @@ namespace internal {
void TurboAssembler::DoubleMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmax(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(3));
+ return;
+ }
+
Label check_zero, return_left, return_right, return_nan, done;
cdbr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -80,6 +87,11 @@ void TurboAssembler::DoubleMax(DoubleRegister result_reg,
void TurboAssembler::DoubleMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmin(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(3));
+ return;
+ }
Label check_zero, return_left, return_right, return_nan, done;
cdbr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -126,6 +138,11 @@ void TurboAssembler::DoubleMin(DoubleRegister result_reg,
void TurboAssembler::FloatMax(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmax(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(2));
+ return;
+ }
Label check_zero, return_left, return_right, return_nan, done;
cebr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -166,6 +183,12 @@ void TurboAssembler::FloatMax(DoubleRegister result_reg,
void TurboAssembler::FloatMin(DoubleRegister result_reg,
DoubleRegister left_reg,
DoubleRegister right_reg) {
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
+ vfmin(result_reg, left_reg, right_reg, Condition(1), Condition(8),
+ Condition(2));
+ return;
+ }
+
Label check_zero, return_left, return_right, return_nan, done;
cebr(left_reg, right_reg);
bunordered(&return_nan, Label::kNear);
@@ -208,6 +231,39 @@ void TurboAssembler::FloatMin(DoubleRegister result_reg,
}
bind(&done);
}
+
+void TurboAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_POS_INF, dst, src);
+}
+
+void TurboAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_POS_INF, dst, src);
+}
+
+void TurboAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_NEG_INF, dst, src);
+}
+
+void TurboAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_NEG_INF, dst, src);
+}
+
+void TurboAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TOWARD_0, dst, src);
+}
+
+void TurboAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TOWARD_0, dst, src);
+}
+
+void TurboAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) {
+ fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+}
+
+void TurboAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) {
+ fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
+}
+
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@@ -227,7 +283,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
RegList list = kJSCallerSaved & ~exclusions;
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
@@ -252,7 +308,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
MultiPush(list);
bytes += NumRegs(list) * kSystemPointerSize;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPushDoubles(kCallerSavedDoubles);
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
@@ -263,7 +319,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
MultiPopDoubles(kCallerSavedDoubles);
bytes += NumRegs(kCallerSavedDoubles) * kDoubleSize;
}
@@ -759,7 +815,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -768,7 +824,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
lay(dst, MemOperand(object, offset - kHeapObjectTag));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
AndP(r0, dst, Operand(kTaggedSize - 1));
beq(&ok, Label::kNear);
@@ -777,13 +833,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
}
@@ -911,13 +967,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
DCHECK(object != value);
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
LoadTaggedPointerField(r0, MemOperand(address));
CmpS64(value, r0);
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
}
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
@@ -926,7 +982,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -950,7 +1006,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
}
@@ -1352,7 +1408,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Reserve room for saved entry sp.
lay(sp, MemOperand(fp, -ExitFrameConstants::kFixedFrameSizeFromFp));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
StoreU64(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(),
r1);
}
@@ -1537,7 +1593,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch,
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
Label regular_invoke;
// r2: actual arguments count
@@ -1653,9 +1709,9 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, r3);
DCHECK_IMPLIES(new_target.is_valid(), new_target == r5);
@@ -1669,18 +1725,20 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = kJavaScriptCallCodeStartRegister;
LoadTaggedPointerField(code,
FieldMemOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(code);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(code);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
}
// Continue here if InvokePrologue does handle the invocation due to
// mismatched parameter counts.
@@ -1689,9 +1747,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
void MacroAssembler::InvokeFunctionWithNewTarget(
Register fun, Register new_target, Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r3.
DCHECK_EQ(fun, r3);
@@ -1707,15 +1765,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
- flag);
+ type);
}
void MacroAssembler::InvokeFunction(Register function,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
// Contract with called JS functions requires that function is passed in r3.
DCHECK_EQ(function, r3);
@@ -1725,18 +1783,7 @@ void MacroAssembler::InvokeFunction(Register function,
FieldMemOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(r3, no_reg, expected_parameter_count,
- actual_parameter_count, flag);
-}
-
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Move(r3, restart_fp);
- LoadU64(r3, MemOperand(r3));
- CmpS64(r3, Operand::Zero());
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
- ne);
+ actual_parameter_count, type);
}
void MacroAssembler::PushStackHandler() {
@@ -1904,8 +1951,8 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
Move(r3, builtin);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -1947,11 +1994,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
}
void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
- if (emit_debug_code()) Check(cond, reason, cr);
+ if (FLAG_debug_code) Check(cond, reason, cr);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
@@ -1965,11 +2012,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -2017,7 +2064,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmi, cr0);
@@ -2025,7 +2072,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(eq, AbortReason::kOperandIsNotASmi, cr0);
@@ -2033,7 +2080,7 @@ void MacroAssembler::AssertSmi(Register object) {
}
void MacroAssembler::AssertConstructor(Register object, Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor);
@@ -2045,7 +2092,7 @@ void MacroAssembler::AssertConstructor(Register object, Register scratch) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
@@ -2059,7 +2106,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
STATIC_ASSERT(kSmiTag == 0);
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
@@ -2071,7 +2118,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
TestIfSmi(object);
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
@@ -2101,7 +2148,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, RootIndex::kUndefinedValue);
diff --git a/chromium/v8/src/codegen/s390/macro-assembler-s390.h b/chromium/v8/src/codegen/s390/macro-assembler-s390.h
index f2719c3086c..13d7ac696b0 100644
--- a/chromium/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/chromium/v8/src/codegen/s390/macro-assembler-s390.h
@@ -32,8 +32,6 @@ inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
return MemOperand(object, index, offset - kHeapObjectTag);
}
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
@@ -70,6 +68,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
DoubleRegister right_reg);
void FloatMin(DoubleRegister result_reg, DoubleRegister left_reg,
DoubleRegister right_reg);
+ void CeilF32(DoubleRegister dst, DoubleRegister src);
+ void CeilF64(DoubleRegister dst, DoubleRegister src);
+ void FloorF32(DoubleRegister dst, DoubleRegister src);
+ void FloorF64(DoubleRegister dst, DoubleRegister src);
+ void TruncF32(DoubleRegister dst, DoubleRegister src);
+ void TruncF64(DoubleRegister dst, DoubleRegister src);
+ void NearestIntF32(DoubleRegister dst, DoubleRegister src);
+ void NearestIntF64(DoubleRegister dst, DoubleRegister src);
+
void LoadFromConstantsTable(Register destination,
int constant_index) override;
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
@@ -769,9 +776,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void ResetRoundingMode();
// These exist to provide portability between 32 and 64bit
- void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg) {
- LoadU64(dst, mem, scratch);
- }
void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
@@ -1094,22 +1098,22 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
- CallRuntime(function, function->nargs, kSaveFPRegs);
+ CallRuntime(function, function->nargs, SaveFPRegsMode::kSave);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1220,7 +1224,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger if necessary.
void CheckDebugHook(Register fun, Register new_target,
@@ -1231,12 +1235,9 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// current context to the context in the function before invoking.
void InvokeFunctionWithNewTarget(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag);
+ InvokeType type);
void InvokeFunction(Register function, Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
-
- // Frame restart support
- void MaybeDropFrames();
+ Register actual_parameter_count, InvokeType type);
// Exception handling
@@ -1353,8 +1354,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For a given |object| notify the garbage collector that the slot |address|
// has been written. |value| is the object being stored. The value and
@@ -1362,15 +1363,15 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWrite(
Register object, Register address, Register value,
LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
};
diff --git a/chromium/v8/src/codegen/safepoint-table.cc b/chromium/v8/src/codegen/safepoint-table.cc
index 58fb6ed9e19..67a17d5f0ee 100644
--- a/chromium/v8/src/codegen/safepoint-table.cc
+++ b/chromium/v8/src/codegen/safepoint-table.cc
@@ -20,20 +20,18 @@ namespace internal {
SafepointTable::SafepointTable(Isolate* isolate, Address pc, Code code)
: SafepointTable(code.InstructionStart(isolate, pc),
- code.SafepointTableAddress(), code.stack_slots(), true) {}
+ code.SafepointTableAddress(), true) {}
#if V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(const wasm::WasmCode* code)
: SafepointTable(code->instruction_start(),
code->instruction_start() + code->safepoint_table_offset(),
- code->stack_slots(), false) {}
+ false) {}
#endif // V8_ENABLE_WEBASSEMBLY
SafepointTable::SafepointTable(Address instruction_start,
- Address safepoint_table_address,
- uint32_t stack_slots, bool has_deopt)
+ Address safepoint_table_address, bool has_deopt)
: instruction_start_(instruction_start),
- stack_slots_(stack_slots),
has_deopt_(has_deopt),
safepoint_table_address_(safepoint_table_address),
length_(ReadLength(safepoint_table_address)),
@@ -69,27 +67,18 @@ SafepointEntry SafepointTable::FindEntry(Address pc) const {
UNREACHABLE();
}
-void SafepointTable::PrintEntry(unsigned index,
- std::ostream& os) const { // NOLINT
+void SafepointTable::PrintEntry(unsigned index, std::ostream& os) const {
disasm::NameConverter converter;
SafepointEntry entry = GetEntry(index);
uint8_t* bits = entry.bits();
// Print the stack slot bits.
if (entry_size_ > 0) {
- const int first = 0;
- int last = entry_size_ - 1;
- for (int i = first; i < last; i++) PrintBits(os, bits[i], kBitsPerByte);
- int last_bits = stack_slots_ - ((last - first) * kBitsPerByte);
- PrintBits(os, bits[last], last_bits);
- }
-}
-
-void SafepointTable::PrintBits(std::ostream& os, // NOLINT
- uint8_t byte, int digits) {
- DCHECK(digits >= 0 && digits <= kBitsPerByte);
- for (int i = 0; i < digits; i++) {
- os << (((byte & (1 << i)) == 0) ? "0" : "1");
+ for (uint32_t i = 0; i < entry_size_; ++i) {
+ for (int bit = 0; bit < kBitsPerByte; ++bit) {
+ os << ((bits[i] & (1 << bit)) ? "1" : "0");
+ }
+ }
}
}
@@ -122,6 +111,12 @@ int SafepointTableBuilder::UpdateDeoptimizationInfo(int pc, int trampoline,
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
RemoveDuplicates();
+ TrimEntries(&bits_per_entry);
+
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
+ // We cannot emit a const pool within the safepoint table.
+ Assembler::BlockConstPoolScope block_const_pool(assembler);
+#endif
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(Code::kMetadataAlignment);
@@ -168,6 +163,7 @@ void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
// Run through the indexes and build a bitmap.
for (int idx : *indexes) {
+ DCHECK_GT(bits_per_entry, idx);
int index = bits_per_entry - 1 - idx;
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
@@ -203,6 +199,28 @@ void SafepointTableBuilder::RemoveDuplicates() {
deoptimization_info_.front().pc = kMaxUInt32;
}
+void SafepointTableBuilder::TrimEntries(int* bits_per_entry) {
+ int min_index = *bits_per_entry;
+ if (min_index == 0) return; // Early exit: nothing to trim.
+
+ for (auto& info : deoptimization_info_) {
+ for (int idx : *info.stack_indexes) {
+ DCHECK_GT(*bits_per_entry, idx); // Validity check.
+ if (idx >= min_index) continue;
+ if (idx == 0) return; // Early exit: nothing to trim.
+ min_index = idx;
+ }
+ }
+
+ DCHECK_LT(0, min_index);
+ *bits_per_entry -= min_index;
+ for (auto& info : deoptimization_info_) {
+ for (int& idx : *info.stack_indexes) {
+ idx -= min_index;
+ }
+ }
+}
+
bool SafepointTableBuilder::IsIdenticalExceptForPc(
const DeoptimizationInfo& info1, const DeoptimizationInfo& info2) const {
if (info1.deopt_index != info2.deopt_index) return false;
diff --git a/chromium/v8/src/codegen/safepoint-table.h b/chromium/v8/src/codegen/safepoint-table.h
index 623b5246980..07bbcaf9a08 100644
--- a/chromium/v8/src/codegen/safepoint-table.h
+++ b/chromium/v8/src/codegen/safepoint-table.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SAFEPOINT_TABLE_H_
#define V8_CODEGEN_SAFEPOINT_TABLE_H_
+#include "src/base/iterator.h"
#include "src/base/memory.h"
#include "src/common/assert-scope.h"
#include "src/utils/allocation.h"
@@ -21,11 +22,14 @@ class WasmCode;
class SafepointEntry {
public:
- SafepointEntry()
- : deopt_index_(0), bits_(nullptr), trampoline_pc_(kNoTrampolinePC) {}
-
- SafepointEntry(unsigned deopt_index, uint8_t* bits, int trampoline_pc)
- : deopt_index_(deopt_index), bits_(bits), trampoline_pc_(trampoline_pc) {
+ SafepointEntry() = default;
+
+ SafepointEntry(unsigned deopt_index, uint8_t* bits, uint8_t* bits_end,
+ int trampoline_pc)
+ : deopt_index_(deopt_index),
+ bits_(bits),
+ bits_end_(bits_end),
+ trampoline_pc_(trampoline_pc) {
DCHECK(is_valid());
}
@@ -38,6 +42,7 @@ class SafepointEntry {
void Reset() {
deopt_index_ = 0;
bits_ = nullptr;
+ bits_end_ = nullptr;
}
int trampoline_pc() { return trampoline_pc_; }
@@ -67,16 +72,23 @@ class SafepointEntry {
return deopt_index_ != kNoDeoptIndex;
}
- uint8_t* bits() {
+ uint8_t* bits() const {
DCHECK(is_valid());
return bits_;
}
+ base::iterator_range<uint8_t*> iterate_bits() const {
+ return base::make_iterator_range(bits_, bits_end_);
+ }
+
+ size_t entry_size() const { return bits_end_ - bits_; }
+
private:
- uint32_t deopt_index_;
- uint8_t* bits_;
+ uint32_t deopt_index_ = 0;
+ uint8_t* bits_ = nullptr;
+ uint8_t* bits_end_ = nullptr;
// It needs to be an integer as it is -1 for eager deoptimizations.
- int trampoline_pc_;
+ int trampoline_pc_ = kNoTrampolinePC;
};
class SafepointTable {
@@ -117,17 +129,17 @@ class SafepointTable {
int trampoline_pc = has_deopt_
? base::Memory<int>(GetTrampolineLocation(index))
: SafepointEntry::kNoTrampolinePC;
- return SafepointEntry(deopt_index, bits, trampoline_pc);
+ return SafepointEntry(deopt_index, bits, bits + entry_size_, trampoline_pc);
}
// Returns the entry for the given pc.
SafepointEntry FindEntry(Address pc) const;
- void PrintEntry(unsigned index, std::ostream& os) const; // NOLINT
+ void PrintEntry(unsigned index, std::ostream& os) const;
private:
SafepointTable(Address instruction_start, Address safepoint_table_address,
- uint32_t stack_slots, bool has_deopt);
+ bool has_deopt);
static const uint8_t kNoRegisters = 0xFF;
@@ -165,12 +177,9 @@ class SafepointTable {
return GetPcOffsetLocation(index) + kTrampolinePcOffset;
}
- static void PrintBits(std::ostream& os, uint8_t byte, int digits);
-
DISALLOW_GARBAGE_COLLECTION(no_gc_)
const Address instruction_start_;
- const uint32_t stack_slots_;
const bool has_deopt_;
// Safepoint table layout.
@@ -254,6 +263,10 @@ class SafepointTableBuilder {
// If all entries are identical, replace them by 1 entry with pc = kMaxUInt32.
void RemoveDuplicates();
+ // Try to trim entries by removing trailing zeros (and shrinking
+ // {bits_per_entry}).
+ void TrimEntries(int* bits_per_entry);
+
ZoneChunkList<DeoptimizationInfo> deoptimization_info_;
unsigned offset_;
diff --git a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index 366d1afac9e..3a73ae09f8a 100644
--- a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -29,6 +29,174 @@ void SharedTurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
}
}
+void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, uint8_t imm8) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vshufps(dst, src1, src2, imm8);
+ } else {
+ if (dst != src1) {
+ movaps(dst, src1);
+ }
+ shufps(dst, src2, imm8);
+ }
+}
+
+void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
+ uint8_t lane) {
+ if (lane == 0) {
+ if (dst != src) {
+ Movaps(dst, src);
+ }
+ } else {
+ DCHECK_EQ(1, lane);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Pass src as operand to avoid false-dependency on dst.
+ vmovhlps(dst, src, src);
+ } else {
+ movhlps(dst, src);
+ }
+ }
+}
+
+void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
+ DoubleRegister rep, uint8_t lane) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ if (lane == 0) {
+ vpblendw(dst, src, rep, 0b00001111);
+ } else {
+ vmovlhps(dst, src, rep);
+ }
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst != src) {
+ DCHECK_NE(dst, rep); // Ensure rep is not overwritten.
+ movaps(dst, src);
+ }
+ if (lane == 0) {
+ pblendw(dst, rep, 0b00001111);
+ } else {
+ movlhps(dst, rep);
+ }
+ }
+}
+
+void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ // The minpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform minpd in both orders, merge the resuls, and adjust.
+ vminpd(scratch, lhs, rhs);
+ vminpd(dst, rhs, lhs);
+ // propagate -0's and NaNs, which may be non-canonical.
+ vorpd(scratch, scratch, dst);
+ // Canonicalize NaNs by quieting and clearing the payload.
+ vcmpunordpd(dst, dst, scratch);
+ vorpd(scratch, scratch, dst);
+ vpsrlq(dst, dst, byte{13});
+ vandnpd(dst, dst, scratch);
+ } else {
+ // Compare lhs with rhs, and rhs with lhs, and have the results in scratch
+ // and dst. If dst overlaps with lhs or rhs, we can save a move.
+ if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ minpd(scratch, dst);
+ minpd(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ movaps(dst, rhs);
+ minpd(scratch, rhs);
+ minpd(dst, lhs);
+ }
+ orpd(scratch, dst);
+ cmpunordpd(dst, scratch);
+ orpd(scratch, dst);
+ psrlq(dst, byte{13});
+ andnpd(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
+ XMMRegister rhs, XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ // The maxpd instruction doesn't propagate NaNs and +0's in its first
+ // operand. Perform maxpd in both orders, merge the resuls, and adjust.
+ vmaxpd(scratch, lhs, rhs);
+ vmaxpd(dst, rhs, lhs);
+ // Find discrepancies.
+ vxorpd(dst, dst, scratch);
+ // Propagate NaNs, which may be non-canonical.
+ vorpd(scratch, scratch, dst);
+ // Propagate sign discrepancy and (subtle) quiet NaNs.
+ vsubpd(scratch, scratch, dst);
+ // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
+ vcmpunordpd(dst, dst, scratch);
+ vpsrlq(dst, dst, byte{13});
+ vandnpd(dst, dst, scratch);
+ } else {
+ if (dst == lhs || dst == rhs) {
+ XMMRegister src = dst == lhs ? rhs : lhs;
+ movaps(scratch, src);
+ maxpd(scratch, dst);
+ maxpd(dst, src);
+ } else {
+ movaps(scratch, lhs);
+ movaps(dst, rhs);
+ maxpd(scratch, rhs);
+ maxpd(dst, lhs);
+ }
+ xorpd(dst, scratch);
+ orpd(scratch, dst);
+ subpd(scratch, dst);
+ cmpunordpd(dst, scratch);
+ psrlq(dst, byte{13});
+ andnpd(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vbroadcastss(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vshufps(dst, src, src, 0);
+ } else {
+ if (dst == src) {
+ // 1 byte shorter than pshufd.
+ shufps(dst, src, 0);
+ } else {
+ pshufd(dst, src, 0);
+ }
+ }
+}
+
+void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
+ uint8_t lane) {
+ DCHECK_LT(lane, 4);
+ // These instructions are shorter than insertps, but will leave junk in
+ // the top lanes of dst.
+ if (lane == 0) {
+ if (dst != src) {
+ Movaps(dst, src);
+ }
+ } else if (lane == 1) {
+ Movshdup(dst, src);
+ } else if (lane == 2 && dst == src) {
+ // Check dst == src to avoid false dependency on dst.
+ Movhlps(dst, src);
+ } else if (dst == src) {
+ Shufps(dst, src, src, lane);
+ } else {
+ Pshufd(dst, src, lane);
+ }
+}
+
void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
uint8_t laneidx) {
if (laneidx == 0) {
@@ -233,6 +401,22 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
}
}
+void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vpxor(scratch, scratch, scratch);
+ vpsubq(dst, scratch, src);
+ } else {
+ if (dst == src) {
+ movaps(scratch, src);
+ std::swap(src, scratch);
+ }
+ pxor(dst, dst);
+ psubq(dst, src);
+ }
+}
+
void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
XMMRegister scratch) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -379,6 +563,17 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
}
}
+void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch) {
+ if (dst == src) {
+ Pcmpeqd(scratch, scratch);
+ Pxor(dst, scratch);
+ } else {
+ Pcmpeqd(dst, dst);
+ Pxor(dst, src);
+ }
+}
+
void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
XMMRegister src1, XMMRegister src2,
XMMRegister scratch) {
diff --git a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index e2778e472d4..6be9444c658 100644
--- a/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/chromium/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -39,121 +39,252 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
}
}
- template <typename Dst, typename... Args>
+ // Shufps that will mov src1 into dst if AVX is not supported.
+ void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ uint8_t imm8);
+
+ // Helper struct to implement functions that check for AVX support and
+ // dispatch to the appropriate AVX/SSE instruction.
+ template <typename Dst, typename Arg, typename... Args>
struct AvxHelper {
Assembler* assm;
base::Optional<CpuFeature> feature = base::nullopt;
// Call a method where the AVX version expects the dst argument to be
// duplicated.
- template <void (Assembler::*avx)(Dst, Dst, Args...),
+ // E.g. Andps(x, y) -> vandps(x, x, y)
+ // -> andps(x, y)
+ template <void (Assembler::*avx)(Dst, Dst, Arg, Args...),
+ void (Assembler::*no_avx)(Dst, Arg, Args...)>
+ void emit(Dst dst, Arg arg, Args... args) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(assm, AVX);
+ (assm->*avx)(dst, dst, arg, args...);
+ } else if (feature.has_value()) {
+ DCHECK(CpuFeatures::IsSupported(*feature));
+ CpuFeatureScope scope(assm, *feature);
+ (assm->*no_avx)(dst, arg, args...);
+ } else {
+ (assm->*no_avx)(dst, arg, args...);
+ }
+ }
+
+ // Call a method in the AVX form (one more operand), but if unsupported will
+ // check that dst == first src.
+ // E.g. Andps(x, y, z) -> vandps(x, y, z)
+ // -> andps(x, z) and check that x == y
+ template <void (Assembler::*avx)(Dst, Arg, Args...),
void (Assembler::*no_avx)(Dst, Args...)>
- void emit(Dst dst, Args... args) {
+ void emit(Dst dst, Arg arg, Args... args) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
- (assm->*avx)(dst, dst, args...);
+ (assm->*avx)(dst, arg, args...);
} else if (feature.has_value()) {
+ DCHECK_EQ(dst, arg);
DCHECK(CpuFeatures::IsSupported(*feature));
CpuFeatureScope scope(assm, *feature);
(assm->*no_avx)(dst, args...);
} else {
+ DCHECK_EQ(dst, arg);
(assm->*no_avx)(dst, args...);
}
}
// Call a method where the AVX version expects no duplicated dst argument.
- template <void (Assembler::*avx)(Dst, Args...),
- void (Assembler::*no_avx)(Dst, Args...)>
- void emit(Dst dst, Args... args) {
+ // E.g. Movddup(x, y) -> vmovddup(x, y)
+ // -> movddup(x, y)
+ template <void (Assembler::*avx)(Dst, Arg, Args...),
+ void (Assembler::*no_avx)(Dst, Arg, Args...)>
+ void emit(Dst dst, Arg arg, Args... args) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
- (assm->*avx)(dst, args...);
+ (assm->*avx)(dst, arg, args...);
} else if (feature.has_value()) {
DCHECK(CpuFeatures::IsSupported(*feature));
CpuFeatureScope scope(assm, *feature);
- (assm->*no_avx)(dst, args...);
+ (assm->*no_avx)(dst, arg, args...);
} else {
- (assm->*no_avx)(dst, args...);
+ (assm->*no_avx)(dst, arg, args...);
}
}
};
-#define AVX_OP(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSE3(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE3)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSE3(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSE3)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSSE3(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSSE3)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSSE3(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSSE3)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSE4_1(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_1)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSE4_1(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSE4_1)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
-#define AVX_OP_SSE4_2(macro_name, name) \
- template <typename Dst, typename... Args> \
- void macro_name(Dst dst, Args... args) { \
- AvxHelper<Dst, Args...>{this, base::Optional<CpuFeature>(SSE4_2)} \
- .template emit<&Assembler::v##name, &Assembler::name>(dst, args...); \
+#define AVX_OP_SSE4_2(macro_name, name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this, base::Optional<CpuFeature>(SSE4_2)} \
+ .template emit<&Assembler::v##name, &Assembler::name>(dst, arg, \
+ args...); \
}
+ // Keep this list sorted by required extension, then instruction name.
+ AVX_OP(Addpd, addpd)
+ AVX_OP(Addps, addps)
+ AVX_OP(Andnpd, andnpd)
+ AVX_OP(Andnps, andnps)
+ AVX_OP(Andpd, andpd)
+ AVX_OP(Andps, andps)
+ AVX_OP(Cmpeqpd, cmpeqpd)
+ AVX_OP(Cmplepd, cmplepd)
+ AVX_OP(Cmpleps, cmpleps)
+ AVX_OP(Cmpltpd, cmpltpd)
+ AVX_OP(Cmpneqpd, cmpneqpd)
+ AVX_OP(Cmpunordpd, cmpunordpd)
+ AVX_OP(Cmpunordps, cmpunordps)
AVX_OP(Cvtdq2pd, cvtdq2pd)
AVX_OP(Cvtdq2ps, cvtdq2ps)
- AVX_OP(Cvtps2pd, cvtps2pd)
AVX_OP(Cvtpd2ps, cvtpd2ps)
+ AVX_OP(Cvtps2pd, cvtps2pd)
AVX_OP(Cvttps2dq, cvttps2dq)
+ AVX_OP(Divpd, divpd)
+ AVX_OP(Divps, divps)
+ AVX_OP(Maxpd, maxpd)
+ AVX_OP(Maxps, maxps)
+ AVX_OP(Minpd, minpd)
+ AVX_OP(Minps, minps)
AVX_OP(Movaps, movaps)
AVX_OP(Movd, movd)
+ AVX_OP(Movhlps, movhlps)
AVX_OP(Movhps, movhps)
AVX_OP(Movlps, movlps)
AVX_OP(Movmskpd, movmskpd)
AVX_OP(Movmskps, movmskps)
- AVX_OP(Movss, movss)
AVX_OP(Movsd, movsd)
+ AVX_OP(Movss, movss)
AVX_OP(Movupd, movupd)
AVX_OP(Movups, movups)
+ AVX_OP(Mulpd, mulpd)
+ AVX_OP(Mulps, mulps)
+ AVX_OP(Orpd, orpd)
+ AVX_OP(Orps, orps)
+ AVX_OP(Packssdw, packssdw)
+ AVX_OP(Packsswb, packsswb)
+ AVX_OP(Packuswb, packuswb)
+ AVX_OP(Paddb, paddb)
+ AVX_OP(Paddd, paddd)
+ AVX_OP(Paddq, paddq)
+ AVX_OP(Paddsb, paddsb)
+ AVX_OP(Paddusb, paddusb)
+ AVX_OP(Paddusw, paddusw)
+ AVX_OP(Paddw, paddw)
+ AVX_OP(Pand, pand)
+ AVX_OP(Pavgb, pavgb)
+ AVX_OP(Pavgw, pavgw)
+ AVX_OP(Pcmpgtb, pcmpgtb)
+ AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Pmaxub, pmaxub)
+ AVX_OP(Pminub, pminub)
AVX_OP(Pmovmskb, pmovmskb)
AVX_OP(Pmullw, pmullw)
- AVX_OP(Pshuflw, pshuflw)
- AVX_OP(Pshufhw, pshufhw)
+ AVX_OP(Pmuludq, pmuludq)
+ AVX_OP(Por, por)
AVX_OP(Pshufd, pshufd)
+ AVX_OP(Pshufhw, pshufhw)
+ AVX_OP(Pshuflw, pshuflw)
+ AVX_OP(Pslld, pslld)
+ AVX_OP(Psllq, psllq)
+ AVX_OP(Psllw, psllw)
+ AVX_OP(Psrad, psrad)
+ AVX_OP(Psraw, psraw)
+ AVX_OP(Psrld, psrld)
+ AVX_OP(Psrlq, psrlq)
+ AVX_OP(Psrlw, psrlw)
+ AVX_OP(Psubb, psubb)
+ AVX_OP(Psubd, psubd)
+ AVX_OP(Psubq, psubq)
+ AVX_OP(Psubsb, psubsb)
+ AVX_OP(Psubusb, psubusb)
+ AVX_OP(Psubw, psubw)
+ AVX_OP(Punpckhbw, punpckhbw)
+ AVX_OP(Punpckhdq, punpckhdq)
+ AVX_OP(Punpckhqdq, punpckhqdq)
+ AVX_OP(Punpckhwd, punpckhwd)
+ AVX_OP(Punpcklbw, punpcklbw)
+ AVX_OP(Punpckldq, punpckldq)
+ AVX_OP(Punpcklqdq, punpcklqdq)
+ AVX_OP(Punpcklwd, punpcklwd)
+ AVX_OP(Pxor, pxor)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
- AVX_OP(Sqrtps, sqrtps)
AVX_OP(Sqrtpd, sqrtpd)
+ AVX_OP(Sqrtps, sqrtps)
+ AVX_OP(Sqrtsd, sqrtsd)
+ AVX_OP(Sqrtss, sqrtss)
+ AVX_OP(Subpd, subpd)
+ AVX_OP(Subps, subps)
+ AVX_OP(Unpcklps, unpcklps)
+ AVX_OP(Xorpd, xorpd)
+ AVX_OP(Xorps, xorps)
+
+ AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSE3(Movddup, movddup)
AVX_OP_SSE3(Movshdup, movshdup)
+
AVX_OP_SSSE3(Pabsb, pabsb)
- AVX_OP_SSSE3(Pabsw, pabsw)
AVX_OP_SSSE3(Pabsd, pabsd)
+ AVX_OP_SSSE3(Pabsw, pabsw)
+ AVX_OP_SSSE3(Palignr, palignr)
+ AVX_OP_SSSE3(Psignb, psignb)
+ AVX_OP_SSSE3(Psignd, psignd)
+ AVX_OP_SSSE3(Psignw, psignw)
+
AVX_OP_SSE4_1(Extractps, extractps)
+ AVX_OP_SSE4_1(Pblendw, pblendw)
AVX_OP_SSE4_1(Pextrb, pextrb)
AVX_OP_SSE4_1(Pextrw, pextrw)
+ AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
+ AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
+ AVX_OP_SSE4_1(Pminsb, pminsb)
AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
- AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
+ AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
- AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
+ AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
AVX_OP_SSE4_1(Ptest, ptest)
- AVX_OP_SSE4_1(Roundps, roundps)
AVX_OP_SSE4_1(Roundpd, roundpd)
+ AVX_OP_SSE4_1(Roundps, roundps)
+ void F64x2ExtractLane(DoubleRegister dst, XMMRegister src, uint8_t lane);
+ void F64x2ReplaceLane(XMMRegister dst, XMMRegister src, DoubleRegister rep,
+ uint8_t lane);
+ void F64x2Min(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
+ void F64x2Max(XMMRegister dst, XMMRegister lhs, XMMRegister rhs,
+ XMMRegister scratch);
+ void F32x4Splat(XMMRegister dst, DoubleRegister src);
+ void F32x4ExtractLane(FloatRegister dst, XMMRegister src, uint8_t lane);
void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
void I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scrat, bool is_signed);
@@ -170,6 +301,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I32x4SConvertI16x8High(XMMRegister dst, XMMRegister src);
void I32x4UConvertI16x8High(XMMRegister dst, XMMRegister src,
XMMRegister scratch);
+ void I64x2Neg(XMMRegister dst, XMMRegister src, XMMRegister scratch);
void I64x2Abs(XMMRegister dst, XMMRegister src, XMMRegister scratch);
void I64x2GtS(XMMRegister dst, XMMRegister src0, XMMRegister src1,
XMMRegister scratch);
@@ -180,6 +312,7 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I64x2SConvertI32x4High(XMMRegister dst, XMMRegister src);
void I64x2UConvertI32x4High(XMMRegister dst, XMMRegister src,
XMMRegister scratch);
+ void S128Not(XMMRegister dst, XMMRegister src, XMMRegister scratch);
// Requires dst == mask when AVX is not supported.
void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
XMMRegister src2, XMMRegister scratch);
diff --git a/chromium/v8/src/codegen/signature.h b/chromium/v8/src/codegen/signature.h
index 2c4ca3e0d92..d6d8b5da0f6 100644
--- a/chromium/v8/src/codegen/signature.h
+++ b/chromium/v8/src/codegen/signature.h
@@ -104,6 +104,14 @@ class Signature : public ZoneObject {
T* buffer_;
};
+ static Signature<T>* Build(Zone* zone, std::initializer_list<T> returns,
+ std::initializer_list<T> params) {
+ Builder builder(zone, returns.size(), params.size());
+ for (T ret : returns) builder.AddReturn(ret);
+ for (T param : params) builder.AddParam(param);
+ return builder.Build();
+ }
+
static constexpr size_t kReturnCountOffset = 0;
static constexpr size_t kParameterCountOffset =
kReturnCountOffset + kSizetSize;
diff --git a/chromium/v8/src/codegen/source-position-table.cc b/chromium/v8/src/codegen/source-position-table.cc
index 63f1d17c70a..27466a26909 100644
--- a/chromium/v8/src/codegen/source-position-table.cc
+++ b/chromium/v8/src/codegen/source-position-table.cc
@@ -170,9 +170,9 @@ void SourcePositionTableBuilder::AddEntry(const PositionTableEntry& entry) {
#endif
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
if (bytes_.empty()) return isolate->factory()->empty_byte_array();
DCHECK(!Omit());
diff --git a/chromium/v8/src/codegen/source-position-table.h b/chromium/v8/src/codegen/source-position-table.h
index 72a4c9f45a8..afd7cc434c9 100644
--- a/chromium/v8/src/codegen/source-position-table.h
+++ b/chromium/v8/src/codegen/source-position-table.h
@@ -54,9 +54,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
void AddPosition(size_t code_offset, SourcePosition source_position,
bool is_statement);
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
+ Handle<ByteArray> ToSourcePositionTable(IsolateT* isolate);
OwnedVector<byte> ToSourcePositionTableVector();
inline bool Omit() const { return mode_ != RECORD_SOURCE_POSITIONS; }
diff --git a/chromium/v8/src/codegen/string-constants.cc b/chromium/v8/src/codegen/string-constants.cc
index 92a5e973962..c1ad5a7b4b6 100644
--- a/chromium/v8/src/codegen/string-constants.cc
+++ b/chromium/v8/src/codegen/string-constants.cc
@@ -5,7 +5,6 @@
#include "src/codegen/string-constants.h"
#include "src/base/functional.h"
-#include "src/numbers/dtoa.h"
#include "src/objects/objects.h"
#include "src/objects/string-inl.h"
@@ -176,7 +175,7 @@ size_t StringConstantBase::GetMaxStringConstantLength() const {
size_t StringLiteral::GetMaxStringConstantLength() const { return length_; }
size_t NumberToStringConstant::GetMaxStringConstantLength() const {
- return kBase10MaximalLength + 1;
+ return kMaxDoubleStringLength;
}
size_t StringCons::GetMaxStringConstantLength() const {
diff --git a/chromium/v8/src/codegen/turbo-assembler.cc b/chromium/v8/src/codegen/turbo-assembler.cc
index e4c694097be..a9f9e08ead6 100644
--- a/chromium/v8/src/codegen/turbo-assembler.cc
+++ b/chromium/v8/src/codegen/turbo-assembler.cc
@@ -115,12 +115,5 @@ bool TurboAssemblerBase::IsAddressableThroughRootRegister(
return isolate->root_register_addressable_region().contains(address);
}
-void TurboAssemblerBase::RecordCommentForOffHeapTrampoline(int builtin_index) {
- if (!FLAG_code_comments) return;
- std::ostringstream str;
- str << "[ Inlined Trampoline to " << Builtins::name(builtin_index);
- RecordComment(str.str().c_str());
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/codegen/turbo-assembler.h b/chromium/v8/src/codegen/turbo-assembler.h
index cc9ef92919b..e25ee2a6295 100644
--- a/chromium/v8/src/codegen/turbo-assembler.h
+++ b/chromium/v8/src/codegen/turbo-assembler.h
@@ -118,13 +118,18 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
static bool IsAddressableThroughRootRegister(
Isolate* isolate, const ExternalReference& reference);
-#ifdef V8_TARGET_OS_WIN
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
// Minimum page size. We must touch memory once per page when expanding the
// stack, to avoid access violations.
static constexpr int kStackPageSize = 4 * KB;
#endif
- void RecordCommentForOffHeapTrampoline(int builtin_index);
+ V8_INLINE void RecordCommentForOffHeapTrampoline(int builtin_index) {
+ if (!FLAG_code_comments) return;
+ std::ostringstream str;
+ str << "[ Inlined Trampoline to " << Builtins::name(builtin_index);
+ RecordComment(str.str().c_str());
+ }
protected:
Isolate* const isolate_ = nullptr;
@@ -150,8 +155,7 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
};
// Avoids emitting calls to the {Builtins::kAbort} builtin when emitting debug
-// code during the lifetime of this scope object. For disabling debug code
-// entirely use the {DontEmitDebugCodeScope} instead.
+// code during the lifetime of this scope object.
class V8_NODISCARD HardAbortScope {
public:
explicit HardAbortScope(TurboAssemblerBase* assembler)
@@ -165,27 +169,6 @@ class V8_NODISCARD HardAbortScope {
bool old_value_;
};
-#ifdef DEBUG
-struct CountIfValidRegisterFunctor {
- template <typename RegType>
- constexpr int operator()(int count, RegType reg) const {
- return count + (reg.is_valid() ? 1 : 0);
- }
-};
-
-template <typename RegType, typename... RegTypes,
- // All arguments must be either Register or DoubleRegister.
- typename = typename std::enable_if<
- base::is_same<Register, RegType, RegTypes...>::value ||
- base::is_same<DoubleRegister, RegType, RegTypes...>::value>::type>
-inline bool AreAliased(RegType first_reg, RegTypes... regs) {
- int num_different_regs = NumRegs(RegType::ListOf(first_reg, regs...));
- int num_given_regs =
- base::fold(CountIfValidRegisterFunctor{}, 0, first_reg, regs...);
- return num_different_regs < num_given_regs;
-}
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.cc b/chromium/v8/src/codegen/x64/assembler-x64.cc
index 032f7eb13d3..eb07f3ba3b7 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/assembler-x64.cc
@@ -86,45 +86,40 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
-
- // To deal with any combination of flags (e.g. --no-enable-sse4-1
- // --enable-sse-4-2), we start checking from the "highest" supported
- // extension, for each extension, enable if newer extension is supported.
- if (cpu.has_avx2() && FLAG_enable_avx2 && IsSupported(AVX)) {
- supported_ |= 1u << AVX2;
- }
- if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() &&
- OSHasAVXSupport()) {
- supported_ |= 1u << FMA3;
- }
- if ((cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() &&
- OSHasAVXSupport()) ||
- IsSupported(AVX2) || IsSupported(FMA3)) {
- supported_ |= 1u << AVX;
- }
- if ((cpu.has_sse42() && FLAG_enable_sse4_2) || IsSupported(AVX)) {
- supported_ |= 1u << SSE4_2;
- }
- if ((cpu.has_sse41() && FLAG_enable_sse4_1) || IsSupported(SSE4_2)) {
- supported_ |= 1u << SSE4_1;
- }
- if ((cpu.has_ssse3() && FLAG_enable_ssse3) || IsSupported(SSE4_1)) {
- supported_ |= 1u << SSSE3;
+ if (cpu.has_sse42()) SetSupported(SSE4_2);
+ if (cpu.has_sse41()) SetSupported(SSE4_1);
+ if (cpu.has_ssse3()) SetSupported(SSSE3);
+ if (cpu.has_sse3()) SetSupported(SSE3);
+ if (cpu.has_avx() && cpu.has_osxsave() && OSHasAVXSupport()) {
+ SetSupported(AVX);
+ if (cpu.has_avx2()) SetSupported(AVX2);
+ if (cpu.has_fma3()) SetSupported(FMA3);
}
- if ((cpu.has_sse3() && FLAG_enable_sse3) || IsSupported(SSSE3))
- supported_ |= 1u << SSE3;
+
// SAHF is not generally available in long mode.
- if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
- if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1;
- if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2;
- if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT;
- if (cpu.has_popcnt() && FLAG_enable_popcnt) supported_ |= 1u << POPCNT;
+ if (cpu.has_sahf() && FLAG_enable_sahf) SetSupported(SAHF);
+ if (cpu.has_bmi1() && FLAG_enable_bmi1) SetSupported(BMI1);
+ if (cpu.has_bmi2() && FLAG_enable_bmi2) SetSupported(BMI2);
+ if (cpu.has_lzcnt() && FLAG_enable_lzcnt) SetSupported(LZCNT);
+ if (cpu.has_popcnt() && FLAG_enable_popcnt) SetSupported(POPCNT);
if (strcmp(FLAG_mcpu, "auto") == 0) {
- if (cpu.is_atom()) supported_ |= 1u << ATOM;
+ if (cpu.is_atom()) SetSupported(ATOM);
} else if (strcmp(FLAG_mcpu, "atom") == 0) {
- supported_ |= 1u << ATOM;
+ SetSupported(ATOM);
}
+ // Ensure that supported cpu features make sense. E.g. it is wrong to support
+ // AVX but not SSE4_2, if we have --enable-avx and --no-enable-sse4-2, the
+ // code above would set AVX to supported, and SSE4_2 to unsupported, then the
+ // checks below will set AVX to unsupported.
+ if (!FLAG_enable_sse3) SetUnsupported(SSE3);
+ if (!FLAG_enable_ssse3 || !IsSupported(SSE3)) SetUnsupported(SSSE3);
+ if (!FLAG_enable_sse4_1 || !IsSupported(SSSE3)) SetUnsupported(SSE4_1);
+ if (!FLAG_enable_sse4_2 || !IsSupported(SSE4_1)) SetUnsupported(SSE4_2);
+ if (!FLAG_enable_avx || !IsSupported(SSE4_2)) SetUnsupported(AVX);
+ if (!FLAG_enable_avx2 || !IsSupported(AVX)) SetUnsupported(AVX2);
+ if (!FLAG_enable_fma3 || !IsSupported(AVX)) SetUnsupported(FMA3);
+
// Set a static value on whether Simd is supported.
// This variable is only used for certain archs to query SupportWasmSimd128()
// at runtime in builtins using an extern ref. Other callers should use
@@ -1419,12 +1414,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
}
void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
- DCHECK(RelocInfo::IsRuntimeEntry(rmode));
+ DCHECK(RelocInfo::IsWasmStubCall(rmode));
EnsureSpace ensure_space(this);
DCHECK(is_uint4(cc));
emit(0x0F);
emit(0x80 | cc);
- emit_runtime_entry(entry, rmode);
+ RecordRelocInfo(rmode);
+ emitl(static_cast<int32_t>(entry));
}
void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) {
diff --git a/chromium/v8/src/codegen/x64/assembler-x64.h b/chromium/v8/src/codegen/x64/assembler-x64.h
index 97e18ed8fef..e6205311c2d 100644
--- a/chromium/v8/src/codegen/x64/assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/assembler-x64.h
@@ -124,6 +124,9 @@ class Immediate {
DCHECK(SmiValuesAre31Bits()); // Only available for 31-bit SMI.
}
+ int32_t value() const { return value_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
private:
const int32_t value_;
const RelocInfo::Mode rmode_ = RelocInfo::NONE;
@@ -1274,6 +1277,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE_CMP_P(cmpeq, 0x0)
SSE_CMP_P(cmplt, 0x1)
SSE_CMP_P(cmple, 0x2)
+ SSE_CMP_P(cmpunord, 0x3)
SSE_CMP_P(cmpneq, 0x4)
SSE_CMP_P(cmpnlt, 0x5)
SSE_CMP_P(cmpnle, 0x6)
@@ -1571,6 +1575,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
AVX_CMP_P(vcmpeq, 0x0)
AVX_CMP_P(vcmplt, 0x1)
AVX_CMP_P(vcmple, 0x2)
+ AVX_CMP_P(vcmpunord, 0x3)
AVX_CMP_P(vcmpneq, 0x4)
AVX_CMP_P(vcmpnlt, 0x5)
AVX_CMP_P(vcmpnle, 0x6)
@@ -2374,8 +2379,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// checks that we did not generate too much.
class EnsureSpace {
public:
- explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
- if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
+ explicit V8_INLINE EnsureSpace(Assembler* assembler) : assembler_(assembler) {
+ if (V8_UNLIKELY(assembler_->buffer_overflow())) assembler_->GrowBuffer();
#ifdef DEBUG
space_before_ = assembler_->available_space();
#endif
@@ -2389,7 +2394,7 @@ class EnsureSpace {
#endif
private:
- Assembler* assembler_;
+ Assembler* const assembler_;
#ifdef DEBUG
int space_before_;
#endif
diff --git a/chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h
new file mode 100644
index 00000000000..a24330a4c77
--- /dev/null
+++ b/chromium/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -0,0 +1,258 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_X64_INTERFACE_DESCRIPTORS_X64_INL_H_
+#define V8_CODEGEN_X64_INTERFACE_DESCRIPTORS_X64_INL_H_
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/codegen/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(rax, rbx, rcx, rdx, rdi);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+// static
+constexpr auto RecordWriteDescriptor::registers() {
+ return RegisterArray(arg_reg_1, arg_reg_2, arg_reg_3, arg_reg_4,
+ kReturnRegister0);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ return RegisterArray(kReturnRegister0, arg_reg_1, arg_reg_2, arg_reg_3,
+ kRuntimeCallFunctionRegister, kContextRegister);
+}
+
+// static
+constexpr auto EphemeronKeyBarrierDescriptor::registers() {
+ return RegisterArray(arg_reg_1, arg_reg_2, arg_reg_3, arg_reg_4,
+ kReturnRegister0);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return rdx; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return rcx; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return rax; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return rdi;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return rdx; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return rcx; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return rax; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return rdi; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return r11; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return rcx; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return rbx;
+}
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
+ return rcx;
+}
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(rbx); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // rax : number of arguments
+ // rdi : the target to call
+ return RegisterArray(rdi, rax);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rcx : arguments list length (untagged)
+ // rbx : arguments list (FixedArray)
+ return RegisterArray(rdi, rax, rcx, rbx);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // rax : number of arguments
+ // rcx : start index (to support rest parameters)
+ // rdi : the target to call
+ return RegisterArray(rdi, rax, rcx);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // rdx: the function template info
+ // rcx: number of arguments (on the stack, not including receiver)
+ return RegisterArray(rdx, rcx);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rbx : the object to spread
+ return RegisterArray(rdi, rax, rbx);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // rdi : the target to call
+ // rbx : the arguments list
+ return RegisterArray(rdi, rbx);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rcx : arguments list length (untagged)
+ // rbx : arguments list (FixedArray)
+ return RegisterArray(rdi, rdx, rax, rcx, rbx);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // rax : number of arguments
+ // rdx : the new target
+ // rcx : start index (to support rest parameters)
+ // rdi : the target to call
+ return RegisterArray(rdi, rdx, rax, rcx);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // rax : number of arguments (on the stack, not including receiver)
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the object to spread
+ return RegisterArray(rdi, rdx, rax, rbx);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // rdi : the target to call
+ // rdx : the new target
+ // rbx : the arguments list
+ return RegisterArray(rdi, rdx, rbx);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // rax : number of arguments
+ // rdx : the new target
+ // rdi : the target to call
+ // rbx : allocation site or undefined
+ return RegisterArray(rdi, rdx, rax, rbx);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(rdx); }
+
+// static
+constexpr auto CompareDescriptor::registers() {
+ return RegisterArray(rdx, rax);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() {
+ return RegisterArray(rdx, rax);
+}
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ return RegisterArray(rdx, rax, rbx);
+}
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ return RegisterArray(rdx, rax, rbx);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ return RegisterArray(rdx, // api function address
+ rcx, // argument count (not including receiver)
+ rbx, // call data
+ rdi); // holder
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ return RegisterArray(rax, // argument count (not including receiver)
+ rbx, // address of first argument
+ rdi); // the target callable to be call
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ return RegisterArray(
+ rax, // argument count (not including receiver)
+ rcx, // address of first argument
+ rdi, // constructor to call
+ rdx, // new target
+ rbx); // allocation site feedback if available, undefined otherwise
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ return RegisterArray(
+ rax, // the value to pass to the generator
+ rdx); // the JSGeneratorObject / JSAsyncGeneratorObject to resume
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(arg_reg_1, arg_reg_2);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_X64
+
+#endif // V8_CODEGEN_X64_INTERFACE_DESCRIPTORS_X64_INL_H_
diff --git a/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc b/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
deleted file mode 100644
index 4029b56d2b3..00000000000
--- a/chromium/v8/src/codegen/x64/interface-descriptors-x64.cc
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen/interface-descriptors.h"
-
-#include "src/execution/frames.h"
-
-namespace v8 {
-namespace internal {
-
-const Register CallInterfaceDescriptor::ContextRegister() { return rsi; }
-
-void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
- CallInterfaceDescriptorData* data, int register_parameter_count) {
- const Register default_stub_registers[] = {rax, rbx, rcx, rdx, rdi};
- CHECK_LE(static_cast<size_t>(register_parameter_count),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(register_parameter_count,
- default_stub_registers);
-}
-
-void RecordWriteDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {arg_reg_1, arg_reg_2, arg_reg_3,
- arg_reg_4, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void DynamicCheckMapsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register default_stub_registers[] = {kReturnRegister0,
- arg_reg_1,
- arg_reg_2,
- arg_reg_3,
- kRuntimeCallFunctionRegister,
- kContextRegister};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- const Register default_stub_registers[] = {arg_reg_1, arg_reg_2, arg_reg_3,
- arg_reg_4, kReturnRegister0};
-
- data->RestrictAllocatableRegisters(default_stub_registers,
- arraysize(default_stub_registers));
-
- CHECK_LE(static_cast<size_t>(kParameterCount),
- arraysize(default_stub_registers));
- data->InitializePlatformSpecific(kParameterCount, default_stub_registers);
-}
-
-const Register LoadDescriptor::ReceiverRegister() { return rdx; }
-const Register LoadDescriptor::NameRegister() { return rcx; }
-const Register LoadDescriptor::SlotRegister() { return rax; }
-
-const Register LoadWithVectorDescriptor::VectorRegister() { return rbx; }
-
-const Register
-LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
- return rdi;
-}
-
-const Register StoreDescriptor::ReceiverRegister() { return rdx; }
-const Register StoreDescriptor::NameRegister() { return rcx; }
-const Register StoreDescriptor::ValueRegister() { return rax; }
-const Register StoreDescriptor::SlotRegister() { return rdi; }
-
-const Register StoreWithVectorDescriptor::VectorRegister() { return rbx; }
-
-const Register StoreTransitionDescriptor::SlotRegister() { return rdi; }
-const Register StoreTransitionDescriptor::VectorRegister() { return rbx; }
-const Register StoreTransitionDescriptor::MapRegister() { return r11; }
-
-const Register ApiGetterDescriptor::HolderRegister() { return rcx; }
-const Register ApiGetterDescriptor::CallbackRegister() { return rbx; }
-
-const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
-const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-
-const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
- return rbx;
-}
-const Register BaselineLeaveFrameDescriptor::WeightRegister() { return rcx; }
-
-void TypeofDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-// static
-const Register TypeConversionDescriptor::ArgumentRegister() { return rax; }
-
-void CallTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rdi : the target to call
- Register registers[] = {rdi, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rcx : arguments list length (untagged)
- // rbx : arguments list (FixedArray)
- Register registers[] = {rdi, rax, rcx, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rcx : start index (to support rest parameters)
- // rdi : the target to call
- Register registers[] = {rdi, rax, rcx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallFunctionTemplateDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdx: the function template info
- // rcx: number of arguments (on the stack, not including receiver)
- Register registers[] = {rdx, rcx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rbx : the object to spread
- Register registers[] = {rdi, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rbx : the arguments list
- Register registers[] = {rdi, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rdx : the new target
- // rcx : arguments list length (untagged)
- // rbx : arguments list (FixedArray)
- Register registers[] = {rdi, rdx, rax, rcx, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rdx : the new target
- // rcx : start index (to support rest parameters)
- // rdi : the target to call
- Register registers[] = {rdi, rdx, rax, rcx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments (on the stack, not including receiver)
- // rdi : the target to call
- // rdx : the new target
- // rbx : the object to spread
- Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rdi : the target to call
- // rdx : the new target
- // rbx : the arguments list
- Register registers[] = {rdi, rdx, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ConstructStubDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- // rax : number of arguments
- // rdx : the new target
- // rdi : the target to call
- // rbx : allocation site or undefined
- Register registers[] = {rdi, rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void AbortDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void CompareDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void Compare_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOpDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void BinaryOp_BaselineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdx, rax, rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ApiCallbackDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rdx, // api function address
- rcx, // argument count (not including receiver)
- rbx, // call data
- rdi, // holder
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterDispatchDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
- kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rax, // argument count (not including receiver)
- rbx, // address of first argument
- rdi // the target callable to be call
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rax, // argument count (not including receiver)
- rcx, // address of first argument
- rdi, // constructor to call
- rdx, // new target
- rbx, // allocation site feedback if available, undefined otherwise
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void ResumeGeneratorDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rax, // the value to pass to the generator
- rdx // the JSGeneratorObject / JSAsyncGeneratorObject to resume
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {
- rbx, // loaded new FP
- };
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void RunMicrotasksEntryDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {arg_reg_1, arg_reg_2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
index cb254370b25..53f3f97f9ae 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -12,6 +12,7 @@
#include "src/codegen/code-factory.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/string-constants.h"
@@ -194,6 +195,9 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
void TurboAssembler::LoadMap(Register destination, Register object) {
LoadTaggedPointerField(destination,
FieldOperand(object, HeapObject::kMapOffset));
+#ifdef V8_MAP_PACKING
+ UnpackMapWord(destination);
+#endif
}
void TurboAssembler::LoadTaggedPointerField(Register destination,
@@ -205,6 +209,16 @@ void TurboAssembler::LoadTaggedPointerField(Register destination,
}
}
+#ifdef V8_MAP_PACKING
+void TurboAssembler::UnpackMapWord(Register r) {
+ // Clear the top two bytes (which may include metadata). Must be in sync with
+ // MapWord::Unpack, and vice versa.
+ shlq(r, Immediate(16));
+ shrq(r, Immediate(16));
+ xorq(r, Immediate(Internals::kMapWordXorMask));
+}
+#endif
+
void TurboAssembler::LoadTaggedSignedField(Register destination,
Operand field_operand) {
if (COMPRESS_POINTERS_BOOL) {
@@ -288,7 +302,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
Operand field_operand) {
RecordComment("[ DecompressTaggedPointer");
movl(destination, field_operand);
- addq(destination, kPointerCageBaseRegister);
+ addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
@@ -296,7 +310,7 @@ void TurboAssembler::DecompressTaggedPointer(Register destination,
Register source) {
RecordComment("[ DecompressTaggedPointer");
movl(destination, source);
- addq(destination, kPointerCageBaseRegister);
+ addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
@@ -304,7 +318,7 @@ void TurboAssembler::DecompressAnyTagged(Register destination,
Operand field_operand) {
RecordComment("[ DecompressAnyTagged");
movl(destination, field_operand);
- addq(destination, kPointerCageBaseRegister);
+ addq(destination, kPtrComprCageBaseRegister);
RecordComment("]");
}
@@ -318,7 +332,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
Label done;
// Skip barrier if writing a smi.
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
JumpIfSmi(value, &done);
}
@@ -327,7 +341,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
DCHECK(IsAligned(offset, kTaggedSize));
leaq(dst, FieldOperand(object, offset));
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
testb(dst, Immediate(kTaggedSize - 1));
j(zero, &ok, Label::kNear);
@@ -336,13 +350,13 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
RecordWrite(object, dst, value, save_fp, remembered_set_action,
- OMIT_SMI_CHECK);
+ SmiCheck::kOmit);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Move(value, kZapValue, RelocInfo::NONE);
Move(dst, kZapValue, RelocInfo::NONE);
}
@@ -357,19 +371,27 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
}
-void TurboAssembler::LoadExternalPointerField(Register destination,
- Operand field_operand,
- ExternalPointerTag tag) {
+void TurboAssembler::LoadExternalPointerField(
+ Register destination, Operand field_operand, ExternalPointerTag tag,
+ Register scratch, IsolateRootLocation isolateRootLocation) {
#ifdef V8_HEAP_SANDBOX
- LoadAddress(kScratchRegister,
- ExternalReference::external_pointer_table_address(isolate()));
- movq(kScratchRegister,
- Operand(kScratchRegister, Internals::kExternalPointerTableBufferOffset));
+ DCHECK(!field_operand.AddressUsesRegister(scratch));
+ if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
+ DCHECK(root_array_available_);
+ movq(scratch, Operand(kRootRegister,
+ IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ } else {
+ DCHECK(isolateRootLocation == IsolateRootLocation::kInScratchRegister);
+ movq(scratch,
+ Operand(scratch, IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset));
+ }
movl(destination, field_operand);
- movq(destination, Operand(kScratchRegister, destination, times_8, 0));
+ movq(destination, Operand(scratch, destination, times_8, 0));
if (tag != 0) {
- movq(kScratchRegister, Immediate64(tag));
- xorq(destination, kScratchRegister);
+ movq(scratch, Immediate64(~tag));
+ andq(destination, scratch);
}
#else
movq(destination, field_operand);
@@ -480,13 +502,13 @@ void MacroAssembler::RecordWrite(Register object, Register address,
DCHECK(value != address);
AssertNotSmi(object);
- if ((remembered_set_action == OMIT_REMEMBERED_SET &&
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
!FLAG_incremental_marking) ||
FLAG_disable_write_barriers) {
return;
}
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label ok;
cmp_tagged(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
@@ -498,7 +520,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// catch stores of smis and stores into the young generation.
Label done;
- if (smi_check == INLINE_SMI_CHECK) {
+ if (smi_check == SmiCheck::kInline) {
// Skip barrier if writing a smi.
JumpIfSmi(value, &done);
}
@@ -519,18 +541,18 @@ void MacroAssembler::RecordWrite(Register object, Register address,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Move(address, kZapValue, RelocInfo::NONE);
Move(value, kZapValue, RelocInfo::NONE);
}
}
void TurboAssembler::Assert(Condition cc, AbortReason reason) {
- if (emit_debug_code()) Check(cc, reason);
+ if (FLAG_debug_code) Check(cc, reason);
}
void TurboAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void TurboAssembler::Check(Condition cc, AbortReason reason) {
@@ -556,11 +578,11 @@ void TurboAssembler::CheckStackAlignment() {
}
void TurboAssembler::Abort(AbortReason reason) {
-#ifdef DEBUG
- const char* msg = GetAbortReason(reason);
- RecordComment("Abort message: ");
- RecordComment(msg);
-#endif
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
@@ -603,7 +625,7 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- Set(rax, num_arguments);
+ Move(rax, num_arguments);
LoadAddress(rbx, ExternalReference::Create(f));
Handle<Code> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
@@ -624,7 +646,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
if (function->nargs >= 0) {
- Set(rax, function->nargs);
+ Move(rax, function->nargs);
}
JumpToExternalReference(ExternalReference::Create(fid));
}
@@ -633,8 +655,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
bool builtin_exit_frame) {
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
- Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
- kArgvOnStack, builtin_exit_frame);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
Jump(code, RelocInfo::CODE_TARGET);
}
@@ -656,7 +678,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
}
// R12 to r15 are callee save on all platforms.
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
bytes += kDoubleSize * XMMRegister::kNumRegisters;
}
@@ -678,7 +700,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
}
// R12 to r15 are callee save on all platforms.
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
int delta = kDoubleSize * XMMRegister::kNumRegisters;
AllocateStackSpace(delta);
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
@@ -694,7 +716,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int bytes = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
Movsd(reg, Operand(rsp, i * kDoubleSize));
@@ -715,6 +737,24 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
+void TurboAssembler::Movq(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
+void TurboAssembler::Movq(Register dst, XMMRegister src) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovq(dst, src);
+ } else {
+ movq(dst, src);
+ }
+}
+
void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
// See comments in Movdqa(XMMRegister, XMMRegister).
if (CpuFeatures::IsSupported(AVX)) {
@@ -1033,7 +1073,7 @@ void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
// The input value is within uint64 range and the second conversion worked
// successfully, but we still have to undo the subtraction we did
// earlier.
- tasm->Set(kScratchRegister, 0x8000000000000000);
+ tasm->Move(kScratchRegister, 0x8000000000000000);
tasm->orq(dst, kScratchRegister);
tasm->bind(&success);
}
@@ -1055,26 +1095,6 @@ void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
}
-void TurboAssembler::Set(Register dst, int64_t x) {
- if (x == 0) {
- xorl(dst, dst);
- } else if (is_uint32(x)) {
- movl(dst, Immediate(static_cast<uint32_t>(x)));
- } else if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- movq(dst, x);
- }
-}
-
-void TurboAssembler::Set(Operand dst, intptr_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
- } else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
- }
-}
// ----------------------------------------------------------------------------
// Smi tagging, untagging and tag detection.
@@ -1084,36 +1104,6 @@ Register TurboAssembler::GetSmiConstant(Smi source) {
return kScratchRegister;
}
-void TurboAssembler::Move(Register dst, Smi source) {
- STATIC_ASSERT(kSmiTag == 0);
- int value = source.value();
- if (value == 0) {
- xorl(dst, dst);
- } else if (SmiValuesAre32Bits() || value < 0) {
- Move(dst, source.ptr(), RelocInfo::NONE);
- } else {
- uint32_t uvalue = static_cast<uint32_t>(source.ptr());
- if (uvalue <= 0xFF) {
- // Emit shorter instructions for small Smis
- xorl(dst, dst);
- movb(dst, Immediate(uvalue));
- } else {
- movl(dst, Immediate(uvalue));
- }
- }
-}
-
-void TurboAssembler::Move(Register dst, ExternalReference ext) {
- // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
- // non-isolate-independent code. In many cases it might be cheaper than
- // embedding the relocatable value.
- if (root_array_available_ && options().isolate_independent_code) {
- IndirectLoadExternalReference(dst, ext);
- return;
- }
- movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
-}
-
void MacroAssembler::Cmp(Register dst, int32_t src) {
if (src == 0) {
testl(dst, dst);
@@ -1335,6 +1325,39 @@ void TurboAssembler::Push(Smi source) {
// ----------------------------------------------------------------------------
+void TurboAssembler::Move(Register dst, Smi source) {
+ STATIC_ASSERT(kSmiTag == 0);
+ int value = source.value();
+ if (value == 0) {
+ xorl(dst, dst);
+ } else if (SmiValuesAre32Bits() || value < 0) {
+ Move(dst, source.ptr(), RelocInfo::NONE);
+ } else {
+ uint32_t uvalue = static_cast<uint32_t>(source.ptr());
+ Move(dst, uvalue);
+ }
+}
+
+void TurboAssembler::Move(Operand dst, intptr_t x) {
+ if (is_int32(x)) {
+ movq(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Move(kScratchRegister, x);
+ movq(dst, kScratchRegister);
+ }
+}
+
+void TurboAssembler::Move(Register dst, ExternalReference ext) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, ext);
+ return;
+ }
+ movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
+}
+
void TurboAssembler::Move(Register dst, Register src) {
if (dst != src) {
movq(dst, src);
@@ -1342,7 +1365,13 @@ void TurboAssembler::Move(Register dst, Register src) {
}
void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
-void TurboAssembler::Move(Register dst, Immediate src) { movl(dst, src); }
+void TurboAssembler::Move(Register dst, Immediate src) {
+ if (src.rmode() == RelocInfo::Mode::NONE) {
+ Move(dst, src.value());
+ } else {
+ movl(dst, src);
+ }
+}
void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
if (dst != src) {
@@ -1481,7 +1510,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
Register counter = scratch;
Label loop, entry;
if (order == PushArrayOrder::kReverse) {
- Set(counter, 0);
+ Move(counter, 0);
jmp(&entry);
bind(&loop);
Push(Operand(array, counter, times_system_pointer_size, 0));
@@ -1681,7 +1710,7 @@ void TurboAssembler::CallBuiltin(int builtin_index) {
Move(kScratchRegister, entry, RelocInfo::OFF_HEAP_TARGET);
call(kScratchRegister);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::TailCallBuiltin(int builtin_index) {
@@ -1698,7 +1727,7 @@ void TurboAssembler::TailCallBuiltin(int builtin_index) {
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Jump(entry, RelocInfo::OFF_HEAP_TARGET);
}
- if (FLAG_code_comments) RecordComment("]");
+ RecordComment("]");
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
@@ -1854,29 +1883,6 @@ void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
}
}
-void TurboAssembler::Unpcklps(XMMRegister dst, XMMRegister src1, Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vunpcklps(dst, src1, src2);
- } else {
- DCHECK_EQ(dst, src1);
- unpcklps(dst, src2);
- }
-}
-
-void TurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vshufps(dst, src1, src2, imm8);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- shufps(dst, src2, imm8);
- }
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2008,36 +2014,6 @@ void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
imm8, base::Optional<CpuFeature>(SSE4_1));
}
-void TurboAssembler::Psllq(XMMRegister dst, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllq(dst, dst, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- psllq(dst, imm8);
- }
-}
-
-void TurboAssembler::Psrlq(XMMRegister dst, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlq(dst, dst, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- psrlq(dst, imm8);
- }
-}
-
-void TurboAssembler::Pslld(XMMRegister dst, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpslld(dst, dst, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- pslld(dst, imm8);
- }
-}
-
void TurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister mask) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -2375,21 +2351,6 @@ void TurboAssembler::Negpd(XMMRegister dst) {
ExternalReference::address_of_double_neg_constant()));
}
-void TurboAssembler::Psrld(XMMRegister dst, byte imm8) {
- Psrld(dst, dst, imm8);
-}
-
-void TurboAssembler::Psrld(XMMRegister dst, XMMRegister src, byte imm8) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrld(dst, src, imm8);
- } else {
- DCHECK(!IsEnabled(AVX));
- DCHECK_EQ(dst, src);
- psrld(dst, imm8);
- }
-}
-
void TurboAssembler::Lzcntl(Register dst, Register src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2593,28 +2554,28 @@ void MacroAssembler::CmpInstanceTypeRange(Register map,
}
void MacroAssembler::AssertNotSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
}
}
void MacroAssembler::AssertSmi(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
void MacroAssembler::AssertSmi(Operand object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Condition is_smi = CheckSmi(object);
Check(is_smi, AbortReason::kOperandIsNotASmi);
}
}
void TurboAssembler::AssertZeroExtended(Register int32_register) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
DCHECK_NE(int32_register, kScratchRegister);
movq(kScratchRegister, int64_t{0x0000000100000000});
cmpq(kScratchRegister, int32_register);
@@ -2623,7 +2584,7 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) {
}
void MacroAssembler::AssertConstructor(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
Push(object);
@@ -2636,7 +2597,7 @@ void MacroAssembler::AssertConstructor(Register object) {
}
void MacroAssembler::AssertFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
Push(object);
@@ -2648,7 +2609,7 @@ void MacroAssembler::AssertFunction(Register object) {
}
void MacroAssembler::AssertBoundFunction(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
Push(object);
@@ -2659,7 +2620,7 @@ void MacroAssembler::AssertBoundFunction(Register object) {
}
void MacroAssembler::AssertGeneratorObject(Register object) {
- if (!emit_debug_code()) return;
+ if (!FLAG_debug_code) return;
testb(object, Immediate(kSmiTagMask));
Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
@@ -2687,12 +2648,16 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
Label done_checking;
AssertNotSmi(object);
Cmp(object, isolate()->factory()->undefined_value());
j(equal, &done_checking);
- Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Register map = object;
+ Push(object);
+ LoadMap(map, object);
+ Cmp(map, isolate()->factory()->allocation_site_map());
+ Pop(object);
Assert(equal, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
@@ -2737,20 +2702,6 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
}
}
-void MacroAssembler::MaybeDropFrames() {
- // Check whether we need to drop frames to restart a function on the stack.
- ExternalReference restart_fp =
- ExternalReference::debug_restart_fp_address(isolate());
- Load(rbx, restart_fp);
- testq(rbx, rbx);
-
- Label dont_drop;
- j(zero, &dont_drop, Label::kNear);
- Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET);
-
- bind(&dont_drop);
-}
-
void TurboAssembler::PrepareForTailCall(Register callee_args_count,
Register caller_args_count,
Register scratch0, Register scratch1) {
@@ -2801,32 +2752,32 @@ void TurboAssembler::PrepareForTailCall(Register callee_args_count,
void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
LoadTaggedPointerField(
rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movzxwq(rbx,
FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
- InvokeFunction(function, new_target, rbx, actual_parameter_count, flag);
+ InvokeFunction(function, new_target, rbx, actual_parameter_count, type);
}
void MacroAssembler::InvokeFunction(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
- DCHECK(function == rdi);
+ InvokeType type) {
+ DCHECK_EQ(function, rdi);
LoadTaggedPointerField(rsi,
FieldOperand(function, JSFunction::kContextOffset));
InvokeFunctionCode(rdi, new_target, expected_parameter_count,
- actual_parameter_count, flag);
+ actual_parameter_count, type);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
Register actual_parameter_count,
- InvokeFlag flag) {
+ InvokeType type) {
// You can't call a function without a valid frame.
- DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
DCHECK_EQ(function, rdi);
DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
@@ -2848,17 +2799,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
}
Label done;
- InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
- if (flag == CALL_FUNCTION) {
- CallCodeObject(rcx);
- } else {
- DCHECK(flag == JUMP_FUNCTION);
- JumpCodeObject(rcx);
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(rcx);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(rcx);
+ break;
}
jmp(&done, Label::kNear);
@@ -2911,7 +2864,7 @@ void MacroAssembler::StackOverflowCheck(
void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count,
- Label* done, InvokeFlag flag) {
+ Label* done, InvokeType type) {
if (expected_parameter_count != actual_parameter_count) {
Label regular_invoke;
// If the expected parameter count is equal to the adaptor sentinel, no need
@@ -2937,9 +2890,9 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(kScratchRegister);
// Extra words are the receiver and the return address (if a jump).
- int extra_words = flag == CALL_FUNCTION ? 1 : 2;
+ int extra_words = type == InvokeType::kCall ? 1 : 2;
leaq(num, Operand(rax, extra_words)); // Number of words to copy.
- Set(current, 0);
+ Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
bind(&copy);
movq(kScratchRegister,
@@ -3034,7 +2987,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
// TODO(v8:11429): Consider passing BASELINE instead, and checking for
// IsJSFrame or similar. Could then unify with manual frame leaves in the
// interpreter too.
- if (emit_debug_code() && !StackFrame::IsJavaScript(type)) {
+ if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
Immediate(StackFrame::TypeToMarker(type)));
Check(equal, AbortReason::kStackFrameTypesMustMatch);
@@ -3043,11 +2996,11 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
popq(rbp);
}
-#ifdef V8_TARGET_OS_WIN
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
- // In windows, we cannot increment the stack size by more than one page
- // (minimum page size is 4KB) without accessing at least one byte on the
- // page. Check this:
+ // On Windows and on macOS, we cannot increment the stack size by more than
+ // one page (minimum page size is 4KB) without accessing at least one byte on
+ // the page. Check this:
// https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
Label check_offset;
Label touch_next_page;
@@ -3278,7 +3231,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
- if (emit_debug_code()) {
+ if (FLAG_debug_code) {
CheckStackAlignment();
}
@@ -3376,7 +3329,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
void TurboAssembler::ResetSpeculationPoisonRegister() {
// TODO(turbofan): Perhaps, we want to put an lfence here.
- Set(kSpeculationPoisonRegister, -1);
+ Move(kSpeculationPoisonRegister, -1);
}
void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
diff --git a/chromium/v8/src/codegen/x64/macro-assembler-x64.h b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
index 40ab1b925c8..da5cf7dae37 100644
--- a/chromium/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/chromium/v8/src/codegen/x64/macro-assembler-x64.h
@@ -14,6 +14,7 @@
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/common/globals.h"
+#include "src/execution/isolate-data.h"
#include "src/objects/contexts.h"
#include "src/objects/tagged-index.h"
@@ -25,9 +26,6 @@ using MemOperand = Operand;
class StringConstantBase;
-enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
-enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
-
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
: reg(index_register), scale(scale) {}
@@ -65,124 +63,43 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
AVX_OP(Subsd, subsd)
AVX_OP(Divss, divss)
AVX_OP(Divsd, divsd)
- AVX_OP(Orps, orps)
- AVX_OP(Xorps, xorps)
- AVX_OP(Xorpd, xorpd)
- AVX_OP(Movq, movq)
- AVX_OP(Movhlps, movhlps)
- AVX_OP(Pcmpeqb, pcmpeqb)
- AVX_OP(Pcmpeqw, pcmpeqw)
- AVX_OP(Pcmpeqd, pcmpeqd)
- AVX_OP(Pcmpgtb, pcmpgtb)
AVX_OP(Pcmpgtw, pcmpgtw)
AVX_OP(Pmaxsw, pmaxsw)
- AVX_OP(Pmaxub, pmaxub)
AVX_OP(Pminsw, pminsw)
- AVX_OP(Pminub, pminub)
AVX_OP(Addss, addss)
AVX_OP(Addsd, addsd)
AVX_OP(Mulsd, mulsd)
- AVX_OP(Andps, andps)
- AVX_OP(Andnps, andnps)
- AVX_OP(Andpd, andpd)
- AVX_OP(Andnpd, andnpd)
- AVX_OP(Orpd, orpd)
AVX_OP(Cmpeqps, cmpeqps)
AVX_OP(Cmpltps, cmpltps)
- AVX_OP(Cmpleps, cmpleps)
AVX_OP(Cmpneqps, cmpneqps)
AVX_OP(Cmpnltps, cmpnltps)
AVX_OP(Cmpnleps, cmpnleps)
- AVX_OP(Cmpeqpd, cmpeqpd)
- AVX_OP(Cmpltpd, cmpltpd)
- AVX_OP(Cmplepd, cmplepd)
- AVX_OP(Cmpneqpd, cmpneqpd)
AVX_OP(Cmpnltpd, cmpnltpd)
AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Sqrtss, sqrtss)
- AVX_OP(Sqrtsd, sqrtsd)
AVX_OP(Cvttpd2dq, cvttpd2dq)
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
- AVX_OP(Pand, pand)
- AVX_OP(Por, por)
- AVX_OP(Pxor, pxor)
- AVX_OP(Psubb, psubb)
- AVX_OP(Psubw, psubw)
- AVX_OP(Psubd, psubd)
- AVX_OP(Psubq, psubq)
- AVX_OP(Psubsb, psubsb)
AVX_OP(Psubsw, psubsw)
- AVX_OP(Psubusb, psubusb)
AVX_OP(Psubusw, psubusw)
- AVX_OP(Pslld, pslld)
- AVX_OP(Pavgb, pavgb)
- AVX_OP(Pavgw, pavgw)
- AVX_OP(Psraw, psraw)
- AVX_OP(Psrad, psrad)
- AVX_OP(Psllw, psllw)
- AVX_OP(Psllq, psllq)
- AVX_OP(Psrlw, psrlw)
- AVX_OP(Psrld, psrld)
- AVX_OP(Psrlq, psrlq)
- AVX_OP(Paddb, paddb)
- AVX_OP(Paddw, paddw)
- AVX_OP(Paddd, paddd)
- AVX_OP(Paddq, paddq)
- AVX_OP(Paddsb, paddsb)
AVX_OP(Paddsw, paddsw)
- AVX_OP(Paddusb, paddusb)
- AVX_OP(Paddusw, paddusw)
AVX_OP(Pcmpgtd, pcmpgtd)
- AVX_OP(Pmuludq, pmuludq)
- AVX_OP(Addpd, addpd)
- AVX_OP(Subpd, subpd)
- AVX_OP(Mulpd, mulpd)
- AVX_OP(Minps, minps)
- AVX_OP(Minpd, minpd)
- AVX_OP(Divpd, divpd)
- AVX_OP(Maxps, maxps)
- AVX_OP(Maxpd, maxpd)
- AVX_OP(Addps, addps)
- AVX_OP(Subps, subps)
- AVX_OP(Mulps, mulps)
- AVX_OP(Divps, divps)
- AVX_OP(Packsswb, packsswb)
- AVX_OP(Packuswb, packuswb)
- AVX_OP(Packssdw, packssdw)
- AVX_OP(Punpcklbw, punpcklbw)
- AVX_OP(Punpcklwd, punpcklwd)
- AVX_OP(Punpckldq, punpckldq)
- AVX_OP(Punpckhbw, punpckhbw)
- AVX_OP(Punpckhwd, punpckhwd)
- AVX_OP(Punpckhdq, punpckhdq)
- AVX_OP(Punpcklqdq, punpcklqdq)
- AVX_OP(Punpckhqdq, punpckhqdq)
- AVX_OP(Cmpps, cmpps)
- AVX_OP(Cmppd, cmppd)
+ AVX_OP(Pcmpeqb, pcmpeqb)
+ AVX_OP(Pcmpeqw, pcmpeqw)
+ AVX_OP(Pcmpeqd, pcmpeqd)
AVX_OP(Movlhps, movlhps)
- AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSSE3(Phaddd, phaddd)
AVX_OP_SSSE3(Phaddw, phaddw)
AVX_OP_SSSE3(Pshufb, pshufb)
- AVX_OP_SSSE3(Psignb, psignb)
- AVX_OP_SSSE3(Psignw, psignw)
- AVX_OP_SSSE3(Psignd, psignd)
- AVX_OP_SSSE3(Palignr, palignr)
AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
AVX_OP_SSE4_1(Packusdw, packusdw)
- AVX_OP_SSE4_1(Pminsb, pminsb)
AVX_OP_SSE4_1(Pminsd, pminsd)
AVX_OP_SSE4_1(Pminuw, pminuw)
AVX_OP_SSE4_1(Pminud, pminud)
- AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
- AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
AVX_OP_SSE4_1(Pmaxud, pmaxud)
AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Insertps, insertps)
AVX_OP_SSE4_1(Pinsrq, pinsrq)
- AVX_OP_SSE4_1(Pblendw, pblendw)
AVX_OP_SSE4_1(Pextrq, pextrq)
AVX_OP_SSE4_1(Roundss, roundss)
AVX_OP_SSE4_1(Roundsd, roundsd)
@@ -190,6 +107,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
#undef AVX_OP
+ // Define movq here instead of using AVX_OP. movq is defined using templates
+ // and there is a function template `void movq(P1)`, while technically
+ // impossible, will be selected when deducing the arguments for AvxHelper.
+ void Movq(XMMRegister dst, Register src);
+ void Movq(Register dst, XMMRegister src);
+
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -199,10 +122,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- // Load a register with a long value as efficiently as possible.
- void Set(Register dst, int64_t x);
- void Set(Operand dst, intptr_t x);
-
// Operations on roots in the root-array.
void LoadRoot(Register destination, RootIndex index) override;
void LoadRoot(Operand destination, RootIndex index) {
@@ -323,8 +242,28 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
j(less, dest);
}
+#ifdef V8_MAP_PACKING
+ void UnpackMapWord(Register r);
+#endif
+
void LoadMap(Register destination, Register object);
+ void Move(Register dst, intptr_t x) {
+ if (x == 0) {
+ xorl(dst, dst);
+ } else if (is_uint8(x)) {
+ xorl(dst, dst);
+ movb(dst, Immediate(static_cast<uint32_t>(x)));
+ } else if (is_uint32(x)) {
+ movl(dst, Immediate(static_cast<uint32_t>(x)));
+ } else if (is_int32(x)) {
+ // "movq reg64, imm32" is sign extending.
+ movq(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ movq(dst, Immediate64(x));
+ }
+ }
+ void Move(Operand dst, intptr_t x);
void Move(Register dst, Smi source);
void Move(Operand dst, Smi source) {
@@ -332,13 +271,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
movq(dst, constant);
}
- void Move(Register dst, TaggedIndex source) {
- movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
- }
+ void Move(Register dst, TaggedIndex source) { Move(dst, source.ptr()); }
- void Move(Operand dst, TaggedIndex source) {
- movl(dst, Immediate(static_cast<uint32_t>(source.ptr())));
- }
+ void Move(Operand dst, TaggedIndex source) { Move(dst, source.ptr()); }
void Move(Register dst, ExternalReference ext);
@@ -449,10 +384,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pmaddubsw(XMMRegister dst, XMMRegister src1, Operand src2);
void Pmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Unpcklps(XMMRegister dst, XMMRegister src1, Operand src2);
- // Shufps that will mov src1 into dst if AVX is not supported.
- void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8);
-
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
@@ -467,16 +398,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Psllq(XMMRegister dst, int imm8) { Psllq(dst, static_cast<byte>(imm8)); }
- void Psllq(XMMRegister dst, byte imm8);
- void Psrlq(XMMRegister dst, int imm8) { Psrlq(dst, static_cast<byte>(imm8)); }
- void Psrlq(XMMRegister dst, byte imm8);
- void Pslld(XMMRegister dst, byte imm8);
- void Psrld(XMMRegister dst, byte imm8);
-
- // Supports both AVX (dst != src1) and SSE (checks that dst == src1).
- void Psrld(XMMRegister dst, XMMRegister src, byte imm8);
-
void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister mask);
void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -551,7 +472,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// stack check, do it before calling this function because this function may
// write into the newly allocated space. It may also overwrite the given
// register's value, in the version that takes a register.
-#ifdef V8_TARGET_OS_WIN
+#if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOSX)
void AllocateStackSpace(Register bytes_scratch);
void AllocateStackSpace(int bytes);
#else
@@ -575,7 +496,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
Move(kRootRegister, isolate_root);
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- Move(kPointerCageBaseRegister, isolate_root);
+ LoadRootRelative(kPtrComprCageBaseRegister,
+ IsolateData::cage_base_offset());
#endif
}
@@ -675,10 +597,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// ---------------------------------------------------------------------------
// V8 Heap sandbox support
+ enum class IsolateRootLocation { kInScratchRegister, kInRootRegister };
// Loads a field containing off-heap pointer and does necessary decoding
// if V8 heap sandbox is enabled.
void LoadExternalPointerField(Register destination, Operand field_operand,
- ExternalPointerTag tag);
+ ExternalPointerTag tag, Register scratch,
+ IsolateRootLocation isolateRootLocation =
+ IsolateRootLocation::kInRootRegister);
protected:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
@@ -751,8 +676,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void RecordWriteField(
Register object, int offset, Register value, Register scratch,
SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// For page containing |object| mark region covering |address|
// dirty. |object| is the object being stored into, |value| is the
@@ -761,11 +686,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// the write barrier if the value is a smi.
void RecordWrite(
Register object, Register address, Register value, SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
- SmiCheck smi_check = INLINE_SMI_CHECK);
-
- // Frame restart support.
- void MaybeDropFrames();
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
// Enter specific kind of exit frame; either in normal or
// debug mode. Expects the number of arguments in register rax and
@@ -797,7 +719,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function code by either calling or jumping.
void InvokeFunctionCode(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// On function call, call into the debugger.
void CallDebugOnFunctionCall(Register fun, Register new_target,
@@ -807,11 +729,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function, Register new_target,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
void InvokeFunction(Register function, Register new_target,
Register expected_parameter_count,
- Register actual_parameter_count, InvokeFlag flag);
+ Register actual_parameter_count, InvokeType type);
// ---------------------------------------------------------------------------
// Conversions between tagged smi values and non-tagged integer values.
@@ -975,18 +897,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
CallRuntime(function, function->nargs, save_doubles);
}
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
}
@@ -1017,7 +939,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
// Helper functions for generating invokes.
void InvokePrologue(Register expected_parameter_count,
Register actual_parameter_count, Label* done,
- InvokeFlag flag);
+ InvokeType type);
void EnterExitFramePrologue(Register saved_rax_reg,
StackFrame::Type frame_type);
diff --git a/chromium/v8/src/codegen/x64/register-x64.h b/chromium/v8/src/codegen/x64/register-x64.h
index 9a812d06a1b..61e7ccf396a 100644
--- a/chromium/v8/src/codegen/x64/register-x64.h
+++ b/chromium/v8/src/codegen/x64/register-x64.h
@@ -212,7 +212,7 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
-constexpr Register kSpeculationPoisonRegister = r12;
+constexpr Register kSpeculationPoisonRegister = r11;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r12;
@@ -236,9 +236,9 @@ constexpr Register kScratchRegister = r10;
constexpr XMMRegister kScratchDoubleReg = xmm15;
constexpr Register kRootRegister = r13; // callee save
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-constexpr Register kPointerCageBaseRegister = r14; // callee save
+constexpr Register kPtrComprCageBaseRegister = r14; // callee save
#else
-constexpr Register kPointerCageBaseRegister = kRootRegister;
+constexpr Register kPtrComprCageBaseRegister = kRootRegister;
#endif
constexpr Register kOffHeapTrampolineRegister = kScratchRegister;
diff --git a/chromium/v8/src/common/external-pointer-inl.h b/chromium/v8/src/common/external-pointer-inl.h
index bc7aea3691b..737bd55e5ca 100644
--- a/chromium/v8/src/common/external-pointer-inl.h
+++ b/chromium/v8/src/common/external-pointer-inl.h
@@ -12,20 +12,13 @@
namespace v8 {
namespace internal {
-V8_INLINE Address DecodeExternalPointer(PtrComprCageBase isolate_root,
+V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
ExternalPointer_t encoded_pointer,
ExternalPointerTag tag) {
STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize);
#ifdef V8_HEAP_SANDBOX
-
- // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
-#endif
-
uint32_t index = static_cast<uint32_t>(encoded_pointer);
- const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
- return isolate->external_pointer_table().get(index) ^ tag;
+ return isolate->external_pointer_table().get(index) & ~tag;
#else
return encoded_pointer;
#endif
@@ -47,9 +40,10 @@ V8_INLINE void InitExternalPointerField(Address field_address,
V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
Address value, ExternalPointerTag tag) {
#ifdef V8_HEAP_SANDBOX
+ DCHECK_EQ(value & kExternalPointerTagMask, 0);
ExternalPointer_t index = isolate->external_pointer_table().allocate();
isolate->external_pointer_table().set(static_cast<uint32_t>(index),
- value ^ tag);
+ value | tag);
static_assert(kExternalPointerSize == kSystemPointerSize,
"Review the code below, once kExternalPointerSize is 4-byte "
"the address of the field will always be aligned");
@@ -68,7 +62,7 @@ V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
}
V8_INLINE Address ReadExternalPointerField(Address field_address,
- PtrComprCageBase cage_base,
+ const Isolate* isolate,
ExternalPointerTag tag) {
// Pointer compression causes types larger than kTaggedSize to be unaligned.
constexpr bool v8_pointer_compression_unaligned =
@@ -79,7 +73,7 @@ V8_INLINE Address ReadExternalPointerField(Address field_address,
} else {
encoded_value = base::Memory<ExternalPointer_t>(field_address);
}
- return DecodeExternalPointer(cage_base, encoded_value, tag);
+ return DecodeExternalPointer(isolate, encoded_value, tag);
}
V8_INLINE void WriteExternalPointerField(Address field_address,
@@ -89,11 +83,12 @@ V8_INLINE void WriteExternalPointerField(Address field_address,
static_assert(kExternalPointerSize == kSystemPointerSize,
"Review the code below, once kExternalPointerSize is 4-byte "
"the address of the field will always be aligned");
+ DCHECK_EQ(value & kExternalPointerTagMask, 0);
ExternalPointer_t index =
base::ReadUnalignedValue<ExternalPointer_t>(field_address);
isolate->external_pointer_table().set(static_cast<uint32_t>(index),
- value ^ tag);
+ value | tag);
#else
// Pointer compression causes types larger than kTaggedSize to be unaligned.
constexpr bool v8_pointer_compression_unaligned =
diff --git a/chromium/v8/src/common/external-pointer.h b/chromium/v8/src/common/external-pointer.h
index c0941f29782..5c1995749c1 100644
--- a/chromium/v8/src/common/external-pointer.h
+++ b/chromium/v8/src/common/external-pointer.h
@@ -12,7 +12,7 @@ namespace internal {
// Convert external pointer from on-V8-heap representation to an actual external
// pointer value.
-V8_INLINE Address DecodeExternalPointer(PtrComprCageBase isolate,
+V8_INLINE Address DecodeExternalPointer(const Isolate* isolate,
ExternalPointer_t encoded_pointer,
ExternalPointerTag tag);
@@ -34,7 +34,7 @@ V8_INLINE void InitExternalPointerField(Address field_address, Isolate* isolate,
// Reads external pointer for the field, and decodes it if the sandbox is
// enabled.
V8_INLINE Address ReadExternalPointerField(Address field_address,
- PtrComprCageBase isolate,
+ const Isolate* isolate,
ExternalPointerTag tag);
// Encodes value if the sandbox is enabled and writes it into the field.
diff --git a/chromium/v8/src/common/globals.h b/chromium/v8/src/common/globals.h
index f51c3210f8c..1be0efd0236 100644
--- a/chromium/v8/src/common/globals.h
+++ b/chromium/v8/src/common/globals.h
@@ -209,6 +209,15 @@ constexpr int kElidedFrameSlots = 0;
#endif
constexpr int kDoubleSizeLog2 = 3;
+// The maximal length of the string representation for a double value
+// (e.g. "-2.2250738585072020E-308"). It is composed as follows:
+// - 17 decimal digits, see kBase10MaximalLength (dtoa.h)
+// - 1 sign
+// - 1 decimal point
+// - 1 E or e
+// - 1 exponent sign
+// - 3 exponent
+constexpr int kMaxDoubleStringLength = 24;
// Total wasm code space per engine (i.e. per process) is limited to make
// certain attacks that rely on heap spraying harder.
@@ -457,11 +466,11 @@ inline LanguageMode stricter_language_mode(LanguageMode mode1,
// a keyed store is of the form a[expression] = foo.
enum class StoreOrigin { kMaybeKeyed, kNamed };
-enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+enum class TypeofMode { kInside, kNotInside };
// Enums used by CEntry.
-enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
-enum ArgvMode { kArgvOnStack, kArgvInRegister };
+enum class SaveFPRegsMode { kIgnore, kSave };
+enum class ArgvMode { kStack, kRegister };
// This constant is used as an undefined value when passing source positions.
constexpr int kNoSourcePosition = -1;
@@ -789,11 +798,14 @@ constexpr int kSpaceTagSize = 4;
STATIC_ASSERT(FIRST_SPACE == 0);
enum class AllocationType : uint8_t {
- kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
- kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
- kCode, // Code object allocated in CODE_SPACE or CODE_LO_SPACE
- kMap, // Map object allocated in MAP_SPACE
- kReadOnly // Object allocated in RO_SPACE
+ kYoung, // Regular object allocated in NEW_SPACE or NEW_LO_SPACE
+ kOld, // Regular object allocated in OLD_SPACE or LO_SPACE
+ kCode, // Code object allocated in CODE_SPACE or CODE_LO_SPACE
+ kMap, // Map object allocated in MAP_SPACE
+ kReadOnly, // Object allocated in RO_SPACE
+ kSharedOld, // Regular object allocated in SHARED_OLD_SPACE or
+ // SHARED_LO_SPACE
+ kSharedMap, // Map object in SHARED_MAP_SPACE
};
inline size_t hash_value(AllocationType kind) {
@@ -812,6 +824,10 @@ inline std::ostream& operator<<(std::ostream& os, AllocationType kind) {
return os << "Map";
case AllocationType::kReadOnly:
return os << "ReadOnly";
+ case AllocationType::kSharedOld:
+ return os << "SharedOld";
+ case AllocationType::kSharedMap:
+ return os << "SharedMap";
}
UNREACHABLE();
}
@@ -880,6 +896,8 @@ enum InlineCacheState {
RECOMPUTE_HANDLER,
// Multiple receiver types have been seen.
POLYMORPHIC,
+ // Many DOM receiver types have been seen for the same accessor.
+ MEGADOM,
// Many receiver types have been seen.
MEGAMORPHIC,
// A generic handler is installed and no extra typefeedback is recorded.
@@ -901,6 +919,8 @@ inline const char* InlineCacheState2String(InlineCacheState state) {
return "POLYMORPHIC";
case MEGAMORPHIC:
return "MEGAMORPHIC";
+ case MEGADOM:
+ return "MEGADOM";
case GENERIC:
return "GENERIC";
}
@@ -1602,6 +1622,7 @@ inline std::ostream& operator<<(std::ostream& os,
}
enum class SpeculationMode { kAllowSpeculation, kDisallowSpeculation };
+enum class CallFeedbackContent { kTarget, kReceiver };
inline std::ostream& operator<<(std::ostream& os,
SpeculationMode speculation_mode) {
@@ -1758,6 +1779,10 @@ class PtrComprCageBase {
inline Address address() const;
+ bool operator==(const PtrComprCageBase& other) const {
+ return address_ == other.address_;
+ }
+
private:
Address address_;
};
diff --git a/chromium/v8/src/common/message-template.h b/chromium/v8/src/common/message-template.h
index 5193a0bae3e..6d7116f73e5 100644
--- a/chromium/v8/src/common/message-template.h
+++ b/chromium/v8/src/common/message-template.h
@@ -330,6 +330,8 @@ namespace internal {
"Expected letters optionally connected with underscores or hyphens for " \
"a location, got %") \
T(InvalidArrayBufferLength, "Invalid array buffer length") \
+ T(InvalidArrayBufferMaxLength, "Invalid array buffer max length") \
+ T(InvalidArrayBufferResizeLength, "%: Invalid length parameter") \
T(ArrayBufferAllocationFailed, "Array buffer allocation failed") \
T(Invalid, "Invalid % : %") \
T(InvalidArrayLength, "Invalid array length") \
@@ -354,6 +356,7 @@ namespace internal {
T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
T(LocaleMatcher, "Illegal value for localeMatcher:%") \
T(NormalizationForm, "The normalization form should be one of %.") \
+ T(OutOfMemory, "%: Out of memory") \
T(ParameterOfFunctionOutOfRange, \
"Paramenter % of function %() is % and out of range") \
T(ZeroDigitNumericSeparator, \
@@ -564,7 +567,7 @@ namespace internal {
T(WasmTrapRemByZero, "remainder by zero") \
T(WasmTrapFloatUnrepresentable, "float unrepresentable in integer range") \
T(WasmTrapTableOutOfBounds, "table index is out of bounds") \
- T(WasmTrapFuncSigMismatch, "function signature mismatch") \
+ T(WasmTrapFuncSigMismatch, "null function or function signature mismatch") \
T(WasmTrapMultiReturnLengthMismatch, "multi-return length mismatch") \
T(WasmTrapJSTypeError, "type incompatibility when transforming from/to JS") \
T(WasmTrapDataSegmentDropped, "data segment has been dropped") \
diff --git a/chromium/v8/src/common/ptr-compr-inl.h b/chromium/v8/src/common/ptr-compr-inl.h
index 66c22311b02..7a4d940891c 100644
--- a/chromium/v8/src/common/ptr-compr-inl.h
+++ b/chromium/v8/src/common/ptr-compr-inl.h
@@ -15,30 +15,10 @@ namespace internal {
#ifdef V8_COMPRESS_POINTERS
-#if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
-
-PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
- : address_(isolate->isolate_root()) {}
-PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
- : address_(isolate->isolate_root()) {}
-
-#elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-
PtrComprCageBase::PtrComprCageBase(const Isolate* isolate)
- : address_(isolate->isolate_root()) {
- UNIMPLEMENTED();
-}
+ : address_(isolate->cage_base()) {}
PtrComprCageBase::PtrComprCageBase(const LocalIsolate* isolate)
- : address_(isolate->isolate_root()) {
- UNIMPLEMENTED();
-}
-
-#else
-
-#error "Pointer compression build configuration error"
-
-#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE,
- // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ : address_(isolate->cage_base()) {}
Address PtrComprCageBase::address() const {
Address ret = address_;
diff --git a/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index a22c79e0ad9..2af0fd498c3 100644
--- a/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/chromium/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -59,8 +59,7 @@ base::Optional<CompilerDispatcher::JobId> CompilerDispatcher::Enqueue(
const FunctionLiteral* function_literal) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherEnqueue");
- RuntimeCallTimerScope runtimeTimer(
- isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileEnqueueOnDispatcher);
if (!IsEnabled()) return base::nullopt;
@@ -129,8 +128,7 @@ void CompilerDispatcher::RegisterSharedFunctionInfo(
void CompilerDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherWaitForBackgroundJob");
- RuntimeCallTimerScope runtimeTimer(
- isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileWaitForDispatcher);
base::MutexGuard lock(&mutex_);
if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
@@ -149,8 +147,7 @@ void CompilerDispatcher::WaitForJobIfRunningOnBackground(Job* job) {
bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompilerDispatcherFinishNow");
- RuntimeCallTimerScope runtimeTimer(
- isolate_, RuntimeCallCounterId::kCompileFinishNowOnDispatcher);
+ RCS_SCOPE(isolate_, RuntimeCallCounterId::kCompileFinishNowOnDispatcher);
if (trace_compiler_dispatcher_) {
PrintF("CompilerDispatcher: finishing ");
function->ShortPrint();
diff --git a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 73292dd5ad6..c7727c951b9 100644
--- a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -57,16 +57,19 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
private:
// v8::Task overrides.
void RunInternal() override {
+#ifdef V8_RUNTIME_CALL_STATS
WorkerThreadRuntimeCallStatsScope runtime_call_stats_scope(
worker_thread_runtime_call_stats_);
LocalIsolate local_isolate(isolate_, ThreadKind::kBackground,
runtime_call_stats_scope.Get());
+#else // V8_RUNTIME_CALL_STATS
+ LocalIsolate local_isolate(isolate_, ThreadKind::kBackground);
+#endif // V8_RUNTIME_CALL_STATS
DCHECK(local_isolate.heap()->IsParked());
{
- RuntimeCallTimerScope runtimeTimer(
- runtime_call_stats_scope.Get(),
- RuntimeCallCounterId::kOptimizeBackgroundDispatcherJob);
+ RCS_SCOPE(runtime_call_stats_scope.Get(),
+ RuntimeCallCounterId::kOptimizeBackgroundDispatcherJob);
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
@@ -78,7 +81,7 @@ class OptimizingCompileDispatcher::CompileTask : public CancelableTask {
}
dispatcher_->CompileNext(dispatcher_->NextInput(&local_isolate),
- runtime_call_stats_scope.Get(), &local_isolate);
+ &local_isolate);
}
{
base::MutexGuard lock_guard(&dispatcher_->ref_count_mutex_);
@@ -111,12 +114,12 @@ OptimizedCompilationJob* OptimizingCompileDispatcher::NextInput(
}
void OptimizingCompileDispatcher::CompileNext(OptimizedCompilationJob* job,
- RuntimeCallStats* stats,
LocalIsolate* local_isolate) {
if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
- CompilationJob::Status status = job->ExecuteJob(stats, local_isolate);
+ CompilationJob::Status status =
+ job->ExecuteJob(local_isolate->runtime_call_stats(), local_isolate);
USE(status); // Prevent an unused-variable error.
{
@@ -167,6 +170,7 @@ void OptimizingCompileDispatcher::FlushQueues(
}
void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
+ HandleScope handle_scope(isolate_);
FlushQueues(blocking_behavior, true);
if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Flushed concurrent recompilation queues. (mode: %s)\n",
@@ -176,6 +180,7 @@ void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
}
void OptimizingCompileDispatcher::Stop() {
+ HandleScope handle_scope(isolate_);
FlushQueues(BlockingBehavior::kBlock, false);
// At this point the optimizing compiler thread's event loop has stopped.
// There is no need for a mutex when reading input_queue_length_.
diff --git a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 37454c67e0d..4ae966192fa 100644
--- a/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/chromium/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -64,8 +64,7 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
bool restore_function_code);
void FlushInputQueue();
void FlushOutputQueue(bool restore_function_code);
- void CompileNext(OptimizedCompilationJob* job, RuntimeCallStats* stats,
- LocalIsolate* local_isolate);
+ void CompileNext(OptimizedCompilationJob* job, LocalIsolate* local_isolate);
OptimizedCompilationJob* NextInput(LocalIsolate* local_isolate);
inline int InputQueueIndex(int i) {
diff --git a/chromium/v8/src/compiler/OWNERS b/chromium/v8/src/compiler/OWNERS
index 5260502f32a..1626bc54876 100644
--- a/chromium/v8/src/compiler/OWNERS
+++ b/chromium/v8/src/compiler/OWNERS
@@ -11,6 +11,8 @@ per-file wasm-*=bbudge@chromium.org
per-file wasm-*=clemensb@chromium.org
per-file wasm-*=gdeepti@chromium.org
per-file wasm-*=jkummerow@chromium.org
+per-file wasm-*=manoskouk@chromium.org
+per-file wasm-*=thibaudm@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
@@ -23,7 +25,3 @@ per-file opcodes.*=ahaas@chromium.org
per-file opcodes.*=bbudge@chromium.org
per-file opcodes.*=gdeepti@chromium.org
per-file opcodes.*=zhin@chromium.org
-
-per-file simd-scalar-lowering.*=bbudge@chromium.org
-per-file simd-scalar-lowering.*=gdeepti@chromium.org
-per-file simd-scalar-lowering.*=zhin@chromium.org
diff --git a/chromium/v8/src/compiler/access-builder.cc b/chromium/v8/src/compiler/access-builder.cc
index 3cc4000d12e..675371df57a 100644
--- a/chromium/v8/src/compiler/access-builder.cc
+++ b/chromium/v8/src/compiler/access-builder.cc
@@ -31,11 +31,11 @@ FieldAccess AccessBuilder::ForExternalIntPtr() {
}
// static
-FieldAccess AccessBuilder::ForMap() {
+FieldAccess AccessBuilder::ForMap(WriteBarrierKind write_barrier) {
FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::OtherInternal(), MachineType::TaggedPointer(),
- kMapWriteBarrier};
+ Type::OtherInternal(), MachineType::MapInHeader(),
+ write_barrier};
return access;
}
@@ -105,12 +105,12 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
}
// static
-FieldAccess AccessBuilder::ForJSObjectInObjectProperty(const MapRef& map,
- int index) {
+FieldAccess AccessBuilder::ForJSObjectInObjectProperty(
+ const MapRef& map, int index, MachineType machine_type) {
int const offset = map.GetInObjectPropertyOffset(index);
FieldAccess access = {kTaggedBase, offset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
+ Type::NonInternal(), machine_type,
kFullWriteBarrier};
return access;
}
@@ -760,7 +760,7 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
- kExternalStringResourceTag,
+ kExternalStringResourceDataTag,
#endif
};
return access;
diff --git a/chromium/v8/src/compiler/access-builder.h b/chromium/v8/src/compiler/access-builder.h
index 08e11179ba7..fa68628cf80 100644
--- a/chromium/v8/src/compiler/access-builder.h
+++ b/chromium/v8/src/compiler/access-builder.h
@@ -31,7 +31,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Access to heap object fields and elements (based on tagged pointer).
// Provides access to HeapObject::map() field.
- static FieldAccess ForMap();
+ static FieldAccess ForMap(WriteBarrierKind write_barrier = kMapWriteBarrier);
// Provides access to HeapNumber::value() field.
static FieldAccess ForHeapNumberValue();
@@ -58,7 +58,9 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForJSObjectElements();
// Provides access to JSObject inobject property fields.
- static FieldAccess ForJSObjectInObjectProperty(const MapRef& map, int index);
+ static FieldAccess ForJSObjectInObjectProperty(
+ const MapRef& map, int index,
+ MachineType machine_type = MachineType::AnyTagged());
static FieldAccess ForJSObjectOffset(
int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
diff --git a/chromium/v8/src/compiler/access-info.cc b/chromium/v8/src/compiler/access-info.cc
index bc3f113eb79..0e3ccc19483 100644
--- a/chromium/v8/src/compiler/access-info.cc
+++ b/chromium/v8/src/compiler/access-info.cc
@@ -385,9 +385,10 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
Handle<Map> map, AccessMode access_mode) const {
// Check if it is safe to inline element access for the {map}.
- MapRef map_ref(broker(), map);
- if (!CanInlineElementAccess(map_ref)) return base::nullopt;
- ElementsKind const elements_kind = map_ref.elements_kind();
+ base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
+ if (!map_ref.has_value()) return {};
+ if (!CanInlineElementAccess(*map_ref)) return base::nullopt;
+ ElementsKind const elements_kind = map_ref->elements_kind();
return ElementAccessInfo({{map}, zone()}, elements_kind, zone());
}
@@ -427,8 +428,8 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
Handle<Map> receiver_map, Handle<Map> map, MaybeHandle<JSObject> holder,
InternalIndex descriptor, AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate()),
- isolate());
+ Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle(
+ map->instance_descriptors(kAcquireLoad));
PropertyDetails const details = descriptors->GetDetails(descriptor);
int index = descriptors->GetFieldIndex(descriptor);
Representation details_representation = details.representation();
@@ -438,46 +439,55 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// fields for which the representation has not yet been
// determined by the runtime. So we need to catch this case
// here and fall back to use the regular IC logic instead.
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
FieldIndex field_index =
FieldIndex::ForPropertyIndex(*map, index, details_representation);
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
- MapRef map_ref(broker(), map);
+
+ base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
+ if (!map_ref.has_value()) return Invalid();
+
ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
- map_ref.SerializeOwnDescriptor(descriptor);
+ if (!map_ref->TrySerializeOwnDescriptor(descriptor)) {
+ return Invalid();
+ }
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
+ dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref,
descriptor));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
+ dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref,
descriptor));
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
- Handle<FieldType> descriptors_field_type(
- descriptors->GetFieldType(descriptor), isolate());
+ Handle<FieldType> descriptors_field_type =
+ broker()->CanonicalPersistentHandle(
+ descriptors->GetFieldType(descriptor));
if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
if (access_mode == AccessMode::kStore) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// The field type was cleared by the GC, so we don't know anything
// about the contents now.
}
unrecorded_dependencies.push_back(
- dependencies()->FieldRepresentationDependencyOffTheRecord(map_ref,
+ dependencies()->FieldRepresentationDependencyOffTheRecord(*map_ref,
descriptor));
if (descriptors_field_type->IsClass()) {
// Remember the field map, and try to infer a useful type.
- Handle<Map> map(descriptors_field_type->AsClass(), isolate());
- field_type = Type::For(MapRef(broker(), map));
+ Handle<Map> map = broker()->CanonicalPersistentHandle(
+ descriptors_field_type->AsClass());
+ base::Optional<MapRef> maybe_ref = TryMakeRef(broker(), map);
+ if (!maybe_ref.has_value()) return Invalid();
+ field_type = Type::For(*maybe_ref);
field_map = MaybeHandle<Map>(map);
}
} else {
@@ -486,7 +496,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// TODO(turbofan): We may want to do this only depending on the use
// of the access info.
unrecorded_dependencies.push_back(
- dependencies()->FieldTypeDependencyOffTheRecord(map_ref, descriptor));
+ dependencies()->FieldTypeDependencyOffTheRecord(*map_ref, descriptor));
PropertyConstness constness;
if (details.IsReadOnly() && !details.IsConfigurable()) {
@@ -497,11 +507,11 @@ PropertyAccessInfo AccessInfoFactory::ComputeDataFieldAccessInfo(
// of turboprop.
constness = PropertyConstness::kMutable;
} else {
- map_ref.SerializeOwnDescriptor(descriptor);
- constness = dependencies()->DependOnFieldConstness(map_ref, descriptor);
+ constness = dependencies()->DependOnFieldConstness(*map_ref, descriptor);
}
- Handle<Map> field_owner_map(map->FindFieldOwner(isolate(), descriptor),
- isolate());
+ // TODO(v8:11670): Make FindFieldOwner and friends robust wrt concurrency.
+ Handle<Map> field_owner_map = broker()->CanonicalPersistentHandle(
+ map->FindFieldOwner(isolate(), descriptor));
switch (constness) {
case PropertyConstness::kMutable:
return PropertyAccessInfo::DataField(
@@ -527,13 +537,14 @@ PropertyAccessInfo AccessorAccessInfoHelper(
AccessMode access_mode, AccessorsObjectGetter get_accessors) {
if (map->instance_type() == JS_MODULE_NAMESPACE_TYPE) {
DCHECK(map->is_prototype_map());
- Handle<PrototypeInfo> proto_info(PrototypeInfo::cast(map->prototype_info()),
- isolate);
- Handle<JSModuleNamespace> module_namespace(
- JSModuleNamespace::cast(proto_info->module_namespace()), isolate);
- Handle<Cell> cell(Cell::cast(module_namespace->module().exports().Lookup(
- isolate, name, Smi::ToInt(name->GetHash()))),
- isolate);
+ Handle<PrototypeInfo> proto_info = broker->CanonicalPersistentHandle(
+ PrototypeInfo::cast(map->prototype_info()));
+ Handle<JSModuleNamespace> module_namespace =
+ broker->CanonicalPersistentHandle(
+ JSModuleNamespace::cast(proto_info->module_namespace()));
+ Handle<Cell> cell = broker->CanonicalPersistentHandle(
+ Cell::cast(module_namespace->module().exports().Lookup(
+ isolate, name, Smi::ToInt(name->GetHash()))));
if (cell->value().IsTheHole(isolate)) {
// This module has not been fully initialized yet.
return PropertyAccessInfo::Invalid(zone);
@@ -548,16 +559,20 @@ PropertyAccessInfo AccessorAccessInfoHelper(
return PropertyAccessInfo::FastAccessorConstant(zone, receiver_map,
Handle<Object>(), holder);
}
- Handle<Object> accessors = get_accessors();
- if (!accessors->IsAccessorPair()) {
+ Handle<Object> maybe_accessors = get_accessors();
+ if (!maybe_accessors->IsAccessorPair()) {
return PropertyAccessInfo::Invalid(zone);
}
- Handle<Object> accessor(access_mode == AccessMode::kLoad
- ? Handle<AccessorPair>::cast(accessors)->getter()
- : Handle<AccessorPair>::cast(accessors)->setter(),
- isolate);
+ Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(maybe_accessors);
+ Handle<Object> accessor = broker->CanonicalPersistentHandle(
+ access_mode == AccessMode::kLoad ? accessors->getter()
+ : accessors->setter());
+
+ ObjectData* data = broker->TryGetOrCreateData(accessor);
+ if (data == nullptr) return PropertyAccessInfo::Invalid(zone);
+
if (!accessor->IsJSFunction()) {
- CallOptimization optimization(isolate, accessor);
+ CallOptimization optimization(broker->local_isolate_or_isolate(), accessor);
if (!optimization.is_simple_api_call() ||
optimization.IsCrossContextLazyAccessorPair(
*broker->target_native_context().object(), *map)) {
@@ -565,7 +580,9 @@ PropertyAccessInfo AccessorAccessInfoHelper(
}
CallOptimization::HolderLookup lookup;
- holder = optimization.LookupHolderOfExpectedType(receiver_map, &lookup);
+ holder = broker->CanonicalPersistentHandle(
+ optimization.LookupHolderOfExpectedType(
+ broker->local_isolate_or_isolate(), receiver_map, &lookup));
if (lookup == CallOptimization::kHolderNotFound) {
return PropertyAccessInfo::Invalid(zone);
}
@@ -574,9 +591,11 @@ PropertyAccessInfo AccessorAccessInfoHelper(
DCHECK_IMPLIES(lookup == CallOptimization::kHolderFound, !holder.is_null());
}
if (access_mode == AccessMode::kLoad) {
- Handle<Name> cached_property_name;
- if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate, accessor)
- .ToHandle(&cached_property_name)) {
+ base::Optional<Name> maybe_cached_property_name =
+ FunctionTemplateInfo::TryGetCachedPropertyName(isolate, *accessor);
+ if (maybe_cached_property_name.has_value()) {
+ Handle<Name> cached_property_name =
+ broker->CanonicalPersistentHandle(maybe_cached_property_name.value());
PropertyAccessInfo access_info = ai_factory->ComputePropertyAccessInfo(
map, cached_property_name, access_mode);
if (!access_info.IsInvalid()) return access_info;
@@ -598,12 +617,13 @@ PropertyAccessInfo AccessInfoFactory::ComputeAccessorDescriptorAccessInfo(
MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const {
DCHECK(descriptor.is_found());
- Handle<DescriptorArray> descriptors(
- holder_map->instance_descriptors(kRelaxedLoad), isolate());
+ Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle(
+ holder_map->instance_descriptors(kRelaxedLoad));
SLOW_DCHECK(descriptor == descriptors->Search(*name, *holder_map));
auto get_accessors = [&]() {
- return handle(descriptors->GetStrongValue(descriptor), isolate());
+ return broker()->CanonicalPersistentHandle(
+ descriptors->GetStrongValue(descriptor));
};
return AccessorAccessInfoHelper(isolate(), zone(), broker(), this,
receiver_map, name, holder_map, holder,
@@ -620,7 +640,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
// We can only inline accesses to constant properties.
if (details.constness() != PropertyConstness::kConst) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
if (details.kind() == PropertyKind::kData) {
@@ -631,7 +651,7 @@ PropertyAccessInfo AccessInfoFactory::ComputeDictionaryProtoAccessInfo(
auto get_accessors = [&]() {
return JSObject::DictionaryPropertyAt(holder, dictionary_index);
};
- Handle<Map> holder_map = handle(holder->map(), isolate());
+ Handle<Map> holder_map = broker()->CanonicalPersistentHandle(holder->map());
return AccessorAccessInfoHelper(isolate(), zone(), broker(), this,
receiver_map, name, holder_map, holder,
access_mode, get_accessors);
@@ -699,17 +719,16 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
Handle<Map> map, Handle<Name> name, AccessMode access_mode) const {
CHECK(name->IsUniqueName());
- base::SharedMutexGuardIf<base::kShared> mutex_guard(
- isolate()->map_updater_access(), should_lock_mutex());
- MapUpdaterMutexDepthScope mumd_scope(this);
+ JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(
+ broker(), isolate()->map_updater_access());
if (access_mode == AccessMode::kHas && !map->IsJSReceiverMap()) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// Check if it is safe to inline property access for the {map}.
if (!CanInlinePropertyAccess(map, access_mode)) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// We support fast inline cases for certain JSObject getters.
@@ -729,7 +748,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
PropertyDetails details = PropertyDetails::Empty();
InternalIndex index = InternalIndex::NotFound();
if (!TryLoadPropertyDetails(map, holder, name, &index, &details)) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
if (index.is_found()) {
@@ -739,7 +758,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
if (details.kind() == kData && !holder.is_null()) {
// This is a store to a property not found on the receiver but on a
@@ -758,7 +777,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// reaching a dictionary mode prototype holding the property . Due to
// this only being an intermediate state, we don't stupport these kind
// of heterogenous prototype chains.
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// TryLoadPropertyDetails only succeeds if we know the holder.
@@ -781,7 +800,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// due to the complications of checking dictionary mode prototypes for
// modification, we don't attempt to support dictionary mode prototypes
// occuring before a fast mode holder on the chain.
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
if (details.location() == kField) {
if (details.kind() == kData) {
@@ -790,7 +809,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
} else {
DCHECK_EQ(kAccessor, details.kind());
// TODO(turbofan): Add support for general accessors?
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
} else {
DCHECK_EQ(kDescriptor, details.location());
@@ -806,9 +825,17 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Don't search on the prototype chain for special indices in case of
// integer indexed exotic objects (see ES6 section 9.4.5).
- if (map->IsJSTypedArrayMap() && name->IsString() &&
- IsSpecialIndex(String::cast(*name))) {
- return PropertyAccessInfo::Invalid(zone());
+ if (map->IsJSTypedArrayMap() && name->IsString()) {
+ if (broker()->IsMainThread()) {
+ if (IsSpecialIndex(String::cast(*name))) {
+ return Invalid();
+ }
+ } else {
+ // TODO(jgruber): We are being conservative here since we can't access
+ // string contents from background threads. Should that become possible
+ // in the future, remove this bailout.
+ return Invalid();
+ }
}
// Don't search on the prototype when storing in literals.
@@ -818,7 +845,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Don't lookup private symbols on the prototype chain.
if (name->IsPrivate()) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
if (V8_DICT_PROPERTY_CONST_TRACKING_BOOL && !holder.is_null()) {
@@ -833,21 +860,26 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
}
// Walk up the prototype chain.
- MapRef(broker(), map).SerializePrototype();
+ base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
+ if (!map_ref.has_value()) return Invalid();
+ if (!map_ref->TrySerializePrototype()) return Invalid();
+
// Acquire synchronously the map's prototype's map to guarantee that every
// time we use it, we use the same Map.
- Handle<Map> map_prototype_map(map->prototype().synchronized_map(),
- isolate());
+ Handle<Map> map_prototype_map =
+ broker()->CanonicalPersistentHandle(map->prototype().map(kAcquireLoad));
if (!map_prototype_map->IsJSObjectMap()) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
Handle<JSFunction> constructor;
- if (Map::GetConstructorFunction(
- map, broker()->target_native_context().object())
- .ToHandle(&constructor)) {
- map = handle(constructor->initial_map(), isolate());
- map_prototype_map =
- handle(map->prototype().synchronized_map(), isolate());
+ base::Optional<JSFunction> maybe_constructor =
+ Map::GetConstructorFunction(
+ *map, *broker()->target_native_context().object());
+ if (maybe_constructor.has_value()) {
+ map = broker()->CanonicalPersistentHandle(
+ maybe_constructor->initial_map());
+ map_prototype_map = broker()->CanonicalPersistentHandle(
+ map->prototype().map(kAcquireLoad));
DCHECK(map_prototype_map->IsJSObjectMap());
} else if (map->prototype().IsNull()) {
if (dictionary_prototype_on_chain) {
@@ -856,7 +888,7 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// with dictionary mode prototypes on the chain, either. This is again
// just due to how we currently deal with dependencies for dictionary
// properties during finalization.
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// Store to property not found on the receiver or any prototype, we need
@@ -870,16 +902,17 @@ PropertyAccessInfo AccessInfoFactory::ComputePropertyAccessInfo(
// Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
return PropertyAccessInfo::NotFound(zone(), receiver_map, holder);
} else {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
}
- holder = handle(JSObject::cast(map->prototype()), isolate());
+ holder =
+ broker()->CanonicalPersistentHandle(JSObject::cast(map->prototype()));
map = map_prototype_map;
CHECK(!map->is_deprecated());
if (!CanInlinePropertyAccess(map, access_mode)) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// Successful lookup on prototype chain needs to guarantee that all the
@@ -901,7 +934,7 @@ PropertyAccessInfo AccessInfoFactory::FinalizePropertyAccessInfosAsOne(
return result;
}
}
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
void AccessInfoFactory::ComputePropertyAccessInfos(
@@ -982,23 +1015,26 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
if (feedback.transition_groups().empty()) return base::nullopt;
DCHECK(!feedback.transition_groups().front().empty());
- MapRef first_map(broker(), feedback.transition_groups().front().front());
- InstanceType instance_type = first_map.instance_type();
- ElementsKind elements_kind = first_map.elements_kind();
+ Handle<Map> first_map = feedback.transition_groups().front().front();
+ base::Optional<MapRef> first_map_ref = TryMakeRef(broker(), first_map);
+ if (!first_map_ref.has_value()) return {};
+ InstanceType instance_type = first_map_ref->instance_type();
+ ElementsKind elements_kind = first_map_ref->elements_kind();
ZoneVector<Handle<Map>> maps(zone());
for (auto const& group : feedback.transition_groups()) {
for (Handle<Map> map_handle : group) {
- MapRef map(broker(), map_handle);
- if (map.instance_type() != instance_type ||
- !CanInlineElementAccess(map)) {
+ base::Optional<MapRef> map = TryMakeRef(broker(), map_handle);
+ if (!map.has_value()) return {};
+ if (map->instance_type() != instance_type ||
+ !CanInlineElementAccess(*map)) {
return base::nullopt;
}
- if (!GeneralizeElementsKind(elements_kind, map.elements_kind())
+ if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
.To(&elements_kind)) {
return base::nullopt;
}
- maps.push_back(map.object());
+ maps.push_back(map->object());
}
}
@@ -1012,7 +1048,7 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
if (Name::Equals(isolate(), name, isolate()->factory()->length_string())) {
return PropertyAccessInfo::StringLength(zone(), map);
}
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// Check for special JSObject field accessors.
FieldIndex field_index;
@@ -1041,7 +1077,7 @@ PropertyAccessInfo AccessInfoFactory::LookupSpecialFieldAccessor(
return PropertyAccessInfo::DataField(zone(), map, {{}, zone()}, field_index,
field_representation, field_type, map);
}
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
PropertyAccessInfo AccessInfoFactory::LookupTransition(
@@ -1051,21 +1087,21 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
TransitionsAccessor(isolate(), map, broker()->is_concurrent_inlining())
.SearchTransition(*name, kData, NONE);
if (transition.is_null()) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
- Handle<Map> transition_map(transition, isolate());
+ Handle<Map> transition_map = broker()->CanonicalPersistentHandle(transition);
InternalIndex const number = transition_map->LastAdded();
- Handle<DescriptorArray> descriptors(
- transition_map->instance_descriptors(kAcquireLoad), isolate());
+ Handle<DescriptorArray> descriptors = broker()->CanonicalPersistentHandle(
+ transition_map->instance_descriptors(kAcquireLoad));
PropertyDetails const details = descriptors->GetDetails(number);
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
// TODO(bmeurer): Handle transition to data constant?
if (details.location() != kField) {
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
}
int const index = details.field_index();
Representation details_representation = details.representation();
@@ -1073,51 +1109,63 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
details_representation);
Type field_type = Type::NonInternal();
MaybeHandle<Map> field_map;
- MapRef transition_map_ref(broker(), transition_map);
+
+ base::Optional<MapRef> transition_map_ref =
+ TryMakeRef(broker(), transition_map);
+ if (!transition_map_ref.has_value()) return Invalid();
+
ZoneVector<CompilationDependency const*> unrecorded_dependencies(zone());
if (details_representation.IsSmi()) {
field_type = Type::SignedSmall();
- transition_map_ref.SerializeOwnDescriptor(number);
+ if (!transition_map_ref->TrySerializeOwnDescriptor(number)) {
+ return Invalid();
+ }
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(
- transition_map_ref, number));
+ *transition_map_ref, number));
} else if (details_representation.IsDouble()) {
field_type = type_cache_->kFloat64;
- transition_map_ref.SerializeOwnDescriptor(number);
+ if (!transition_map_ref->TrySerializeOwnDescriptor(number)) {
+ return Invalid();
+ }
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(
- transition_map_ref, number));
+ *transition_map_ref, number));
} else if (details_representation.IsHeapObject()) {
// Extract the field type from the property details (make sure its
// representation is TaggedPointer to reflect the heap object case).
- Handle<FieldType> descriptors_field_type(descriptors->GetFieldType(number),
- isolate());
+ Handle<FieldType> descriptors_field_type =
+ broker()->CanonicalPersistentHandle(descriptors->GetFieldType(number));
if (descriptors_field_type->IsNone()) {
// Store is not safe if the field type was cleared.
- return PropertyAccessInfo::Invalid(zone());
+ return Invalid();
+ }
+ if (!transition_map_ref->TrySerializeOwnDescriptor(number)) {
+ return Invalid();
}
- transition_map_ref.SerializeOwnDescriptor(number);
unrecorded_dependencies.push_back(
dependencies()->FieldRepresentationDependencyOffTheRecord(
- transition_map_ref, number));
+ *transition_map_ref, number));
if (descriptors_field_type->IsClass()) {
unrecorded_dependencies.push_back(
- dependencies()->FieldTypeDependencyOffTheRecord(transition_map_ref,
+ dependencies()->FieldTypeDependencyOffTheRecord(*transition_map_ref,
number));
// Remember the field map, and try to infer a useful type.
- Handle<Map> map(descriptors_field_type->AsClass(), isolate());
- field_type = Type::For(MapRef(broker(), map));
- field_map = MaybeHandle<Map>(map);
+ Handle<Map> map = broker()->CanonicalPersistentHandle(
+ descriptors_field_type->AsClass());
+ base::Optional<MapRef> map_ref = TryMakeRef(broker(), map);
+ if (!map_ref.has_value()) return Invalid();
+ field_type = Type::For(*map_ref);
+ field_map = map;
}
}
unrecorded_dependencies.push_back(
- dependencies()->TransitionDependencyOffTheRecord(
- MapRef(broker(), transition_map)));
- transition_map_ref.SerializeBackPointer(); // For BuildPropertyStore.
+ dependencies()->TransitionDependencyOffTheRecord(*transition_map_ref));
+ transition_map_ref->SerializeBackPointer(); // For BuildPropertyStore.
// Transitioning stores *may* store to const fields. The resulting
// DataConstant access infos can be distinguished from later, i.e. redundant,
// stores to the same constant field by the presence of a transition map.
- switch (dependencies()->DependOnFieldConstness(transition_map_ref, number)) {
+ switch (dependencies()->DependOnFieldConstness(*transition_map_ref, number)) {
case PropertyConstness::kMutable:
return PropertyAccessInfo::DataField(
zone(), map, std::move(unrecorded_dependencies), field_index,
diff --git a/chromium/v8/src/compiler/access-info.h b/chromium/v8/src/compiler/access-info.h
index b430dacd3ae..93215ea0a02 100644
--- a/chromium/v8/src/compiler/access-info.h
+++ b/chromium/v8/src/compiler/access-info.h
@@ -305,6 +305,10 @@ class AccessInfoFactory final {
MaybeHandle<JSObject> holder, InternalIndex descriptor,
AccessMode access_mode) const;
+ PropertyAccessInfo Invalid() const {
+ return PropertyAccessInfo::Invalid(zone());
+ }
+
void MergePropertyAccessInfos(ZoneVector<PropertyAccessInfo> infos,
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* result) const;
@@ -313,28 +317,6 @@ class AccessInfoFactory final {
Handle<Name> name, InternalIndex* index_out,
PropertyDetails* details_out) const;
- bool should_lock_mutex() const { return map_updater_mutex_depth_ == 0; }
-
- class MapUpdaterMutexDepthScope final {
- public:
- explicit MapUpdaterMutexDepthScope(const AccessInfoFactory* ptr)
- : ptr_(ptr),
- initial_map_updater_mutex_depth_(ptr->map_updater_mutex_depth_) {
- ptr_->map_updater_mutex_depth_++;
- }
-
- ~MapUpdaterMutexDepthScope() {
- ptr_->map_updater_mutex_depth_--;
- DCHECK_EQ(initial_map_updater_mutex_depth_,
- ptr_->map_updater_mutex_depth_);
- USE(initial_map_updater_mutex_depth_);
- }
-
- private:
- const AccessInfoFactory* const ptr_;
- const int initial_map_updater_mutex_depth_;
- };
-
CompilationDependencies* dependencies() const { return dependencies_; }
JSHeapBroker* broker() const { return broker_; }
Isolate* isolate() const;
@@ -345,12 +327,6 @@ class AccessInfoFactory final {
TypeCache const* const type_cache_;
Zone* const zone_;
- // ComputePropertyAccessInfo can be called recursively, thus we need to
- // emulate a recursive mutex. This field holds the locking depth, i.e. how
- // many times the mutex has been recursively locked. Only the outermost
- // locker actually locks underneath.
- mutable int map_updater_mutex_depth_ = 0;
-
// TODO(nicohartmann@): Move to public
AccessInfoFactory(const AccessInfoFactory&) = delete;
AccessInfoFactory& operator=(const AccessInfoFactory&) = delete;
diff --git a/chromium/v8/src/compiler/add-type-assertions-reducer.cc b/chromium/v8/src/compiler/add-type-assertions-reducer.cc
index 59d2fe68203..d555d518d2c 100644
--- a/chromium/v8/src/compiler/add-type-assertions-reducer.cc
+++ b/chromium/v8/src/compiler/add-type-assertions-reducer.cc
@@ -27,9 +27,7 @@ Reduction AddTypeAssertionsReducer::Reduce(Node* node) {
visited_.Set(node, true);
Type type = NodeProperties::GetType(node);
- if (!type.IsRange()) {
- return NoChange();
- }
+ if (!type.CanBeAsserted()) return NoChange();
Node* assertion = graph()->NewNode(simplified()->AssertType(type), node);
NodeProperties::SetType(assertion, type);
diff --git a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
index dba31fe0bc3..35d8cb73618 100644
--- a/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -194,16 +194,14 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
__ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push(lr);
@@ -828,7 +826,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -841,7 +840,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -999,6 +999,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
ne, ool->entry());
__ bind(ool->exit());
@@ -3576,7 +3579,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
@@ -3793,7 +3796,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
@@ -3906,7 +3909,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ cmp(g.ToRegister(additional_pop_count), Operand(0));
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
}
diff --git a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index d28ada322d4..bb17d45253d 100644
--- a/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/chromium/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -626,6 +626,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -709,6 +710,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -3170,6 +3172,12 @@ void InstructionSelector::VisitF64x2PromoteLowF32x4(Node* node) {
g.UseFixed(node->InputAt(0), q0));
}
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index 91b8f1f04b1..a10c9f98b22 100644
--- a/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -104,7 +104,7 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
Register OutputRegister64() { return OutputRegister(); }
- Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
+ Register OutputRegister32() { return OutputRegister().W(); }
Register TempRegister32(size_t index) {
return ToRegister(instr_->TempAt(index)).W();
@@ -282,19 +282,17 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
__ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, ne,
exit());
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ Push<TurboAssembler::kSignLR>(lr, padreg);
@@ -859,7 +857,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -872,7 +871,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -1026,6 +1026,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
__ StoreTaggedField(value, MemOperand(object, offset));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
eq, ool->entry());
__ Bind(ool->exit());
@@ -1802,6 +1805,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
+ case kArm64LdrsbW:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ __ Ldrsb(i.OutputRegister32(), i.MemoryOperand());
+ break;
case kArm64Strb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
@@ -1816,6 +1823,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
+ case kArm64LdrshW:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
+ __ Ldrsh(i.OutputRegister32(), i.MemoryOperand());
+ break;
case kArm64Strh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
@@ -2862,7 +2873,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
SIMD_REDUCE_OP_CASE(kArm64I8x16AllTrue, Uminv, kFormatB, 16B);
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
#undef SIMD_UNOP_CASE
#undef SIMD_BINOP_CASE
@@ -3106,7 +3117,7 @@ void CodeGenerator::AssembleConstructFrame() {
// unoptimized frame is still on the stack. Optimized code uses OSR values
// directly from the unoptimized frame. Thus, all that needs to be done is
// to allocate the remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
__ CodeEntry();
size_t unoptimized_frame_slots = osr_helper()->UnoptimizedFrameSlots();
@@ -3280,7 +3291,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ cmp(g.ToRegister(additional_pop_count), Operand(0));
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
}
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 7c376168425..6612269548d 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -160,9 +160,11 @@ namespace compiler {
V(Arm64StrQ) \
V(Arm64Ldrb) \
V(Arm64Ldrsb) \
+ V(Arm64LdrsbW) \
V(Arm64Strb) \
V(Arm64Ldrh) \
V(Arm64Ldrsh) \
+ V(Arm64LdrshW) \
V(Arm64Strh) \
V(Arm64Ldrsw) \
V(Arm64LdrW) \
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index 520db21dde0..293712858f5 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -361,8 +361,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrQ:
case kArm64Ldrb:
case kArm64Ldrsb:
+ case kArm64LdrsbW:
case kArm64Ldrh:
case kArm64Ldrsh:
+ case kArm64LdrshW:
case kArm64Ldrsw:
case kArm64LdrW:
case kArm64Ldr:
diff --git a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index d9e388b4b5e..94ba4d78b33 100644
--- a/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/chromium/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -354,6 +354,12 @@ bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
return true;
}
}
+ } else if (nm.IsChangeInt32ToInt64()) {
+ // Use extended register form.
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_SXTW);
+ *left_op = g->UseRegister(left_node);
+ *right_op = g->UseRegister(right_node->InputAt(0));
+ return true;
}
return false;
}
@@ -781,11 +787,19 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kArm64Ldrsb : kArm64Ldrb;
+ opcode = load_rep.IsUnsigned()
+ ? kArm64Ldrb
+ : load_rep.semantic() == MachineSemantic::kInt32
+ ? kArm64LdrsbW
+ : kArm64Ldrsb;
immediate_mode = kLoadStoreImm8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kArm64Ldrsh : kArm64Ldrh;
+ opcode = load_rep.IsUnsigned()
+ ? kArm64Ldrh
+ : load_rep.semantic() == MachineSemantic::kInt32
+ ? kArm64LdrshW
+ : kArm64Ldrsh;
immediate_mode = kLoadStoreImm16;
break;
case MachineRepresentation::kWord32:
@@ -827,6 +841,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kArm64LdrQ;
immediate_mode = kNoImmediate;
break;
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -936,6 +951,7 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kArm64StrQ;
immediate_mode = kNoImmediate;
break;
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -4040,6 +4056,12 @@ void InstructionSelector::VisitI8x16Popcnt(Node* node) {
VisitRR(this, code, node);
}
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ continuation_outputs_.push_back(g->DefineAsRegister(node));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/code-generator.cc b/chromium/v8/src/compiler/backend/code-generator.cc
index 3ed9eaabf14..5abe799e777 100644
--- a/chromium/v8/src/compiler/backend/code-generator.cc
+++ b/chromium/v8/src/compiler/backend/code-generator.cc
@@ -588,11 +588,6 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
return MaybeHandle<Code>();
}
- // TODO(jgruber,v8:8888): Turn this into a DCHECK once confidence is
- // high that the implementation is complete.
- CHECK_IMPLIES(info()->IsNativeContextIndependent(),
- code->IsNativeContextIndependent(isolate()));
-
// Counts both compiled code and metadata.
isolate()->counters()->total_compiled_code_size()->Increment(
code->raw_body_size());
@@ -1400,7 +1395,8 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr,
default:
UNREACHABLE();
}
- if (literal.object().equals(info()->closure())) {
+ if (literal.object().equals(info()->closure()) &&
+ info()->function_context_specializing()) {
translations_.StoreJSFrameFunction();
} else {
int literal_id = DefineDeoptimizationLiteral(literal);
diff --git a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index ac5d589790e..af2eec4e8e2 100644
--- a/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -320,18 +320,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
__ lea(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
#if V8_ENABLE_WEBASSEMBLY
@@ -832,7 +830,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -845,7 +844,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -1004,6 +1004,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ mov(operand, value);
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
@@ -1866,77 +1869,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F64x2Splat: {
- XMMRegister dst = i.OutputDoubleRegister();
- XMMRegister src = i.InputDoubleRegister(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vshufpd(i.OutputSimd128Register(), src, src, 0x0);
- } else {
- DCHECK_EQ(dst, src);
- __ shufpd(dst, src, 0x0);
- }
- break;
- }
- case kSSEF64x2ExtractLane: {
- DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- XMMRegister dst = i.OutputDoubleRegister();
- int8_t lane = i.InputInt8(1);
- if (lane != 0) {
- DCHECK_EQ(lane, 1);
- __ shufpd(dst, dst, lane);
- }
+ __ Movddup(i.OutputSimd128Register(), i.InputDoubleRegister(0));
break;
}
- case kAVXF64x2ExtractLane: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputDoubleRegister();
- XMMRegister src = i.InputSimd128Register(0);
- int8_t lane = i.InputInt8(1);
- if (lane == 0) {
- if (dst != src) __ vmovapd(dst, src);
- } else {
- DCHECK_EQ(lane, 1);
- __ vshufpd(dst, src, src, lane);
- }
+ case kF64x2ExtractLane: {
+ __ F64x2ExtractLane(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputUint8(1));
break;
}
- case kSSEF64x2ReplaceLane: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- int8_t lane = i.InputInt8(1);
- DoubleRegister rep = i.InputDoubleRegister(2);
-
- // insertps takes a mask which contains (high to low):
- // - 2 bit specifying source float element to copy
- // - 2 bit specifying destination float element to write to
- // - 4 bits specifying which elements of the destination to zero
- DCHECK_LT(lane, 2);
- if (lane == 0) {
- __ insertps(dst, rep, 0b00000000);
- __ insertps(dst, rep, 0b01010000);
- } else {
- __ insertps(dst, rep, 0b00100000);
- __ insertps(dst, rep, 0b01110000);
- }
- break;
- }
- case kAVXF64x2ReplaceLane: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- int8_t lane = i.InputInt8(1);
- DoubleRegister rep = i.InputDoubleRegister(2);
- DCHECK_NE(dst, rep);
-
- DCHECK_LT(lane, 2);
- if (lane == 0) {
- __ vinsertps(dst, src, rep, 0b00000000);
- __ vinsertps(dst, dst, rep, 0b01010000);
- } else {
- __ vinsertps(dst, src, rep, 0b00100000);
- __ vinsertps(dst, dst, rep, 0b01110000);
- }
+ case kF64x2ReplaceLane: {
+ __ F64x2ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputDoubleRegister(2), i.InputInt8(1));
break;
}
case kIA32F64x2Sqrt: {
@@ -1964,44 +1907,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F64x2Min: {
- Operand src1 = i.InputOperand(1);
- XMMRegister dst = i.OutputSimd128Register(),
- src = i.InputSimd128Register(0),
- tmp = i.TempSimd128Register(0);
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the resuls, and adjust.
- __ Movupd(tmp, src1);
- __ Minpd(tmp, tmp, src);
- __ Minpd(dst, src, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orpd(tmp, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpunordpd(dst, dst, tmp);
- __ Orpd(tmp, dst);
- __ Psrlq(dst, 13);
- __ Andnpd(dst, tmp);
+ __ F64x2Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32F64x2Max: {
- Operand src1 = i.InputOperand(1);
- XMMRegister dst = i.OutputSimd128Register(),
- src = i.InputSimd128Register(0),
- tmp = i.TempSimd128Register(0);
- // The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the resuls, and adjust.
- __ Movupd(tmp, src1);
- __ Maxpd(tmp, tmp, src);
- __ Maxpd(dst, src, src1);
- // Find discrepancies.
- __ Xorpd(dst, tmp);
- // Propagate NaNs, which may be non-canonical.
- __ Orpd(tmp, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subpd(tmp, tmp, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpunordpd(dst, dst, tmp);
- __ Psrlq(dst, 13);
- __ Andnpd(dst, tmp);
+ __ F64x2Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kIA32F64x2Eq: {
@@ -2158,10 +2070,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I64x2Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(0);
- __ Pxor(dst, dst);
- __ Psubq(dst, src);
+ __ I64x2Neg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kIA32I64x2Shl: {
@@ -2181,7 +2091,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Set up a mask [0x80000000,0,0x80000000,0].
__ Pcmpeqb(tmp2, tmp2);
- __ Psllq(tmp2, tmp2, 63);
+ __ Psllq(tmp2, tmp2, byte{63});
__ Psrlq(tmp2, tmp2, tmp);
__ Psrlq(dst, src, tmp);
@@ -2210,15 +2120,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movaps(tmp2, right);
// Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, 32);
+ __ Psrlq(tmp1, byte{32});
__ Pmuludq(tmp1, tmp1, right);
// Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, 32);
+ __ Psrlq(tmp2, byte{32});
__ Pmuludq(tmp2, tmp2, left);
__ Paddq(tmp2, tmp2, tmp1);
- __ Psllq(tmp2, tmp2, 32);
+ __ Psllq(tmp2, tmp2, byte{32});
__ Pmuludq(dst, left, right);
__ Paddq(dst, dst, tmp2);
@@ -2302,38 +2212,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32F32x4Splat: {
- XMMRegister dst = i.OutputDoubleRegister();
- XMMRegister src = i.InputDoubleRegister(0);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vshufps(i.OutputSimd128Register(), src, src, 0x0);
- } else {
- DCHECK_EQ(dst, src);
- __ shufps(dst, src, 0x0);
- }
+ __ F32x4Splat(i.OutputSimd128Register(), i.InputDoubleRegister(0));
break;
}
case kIA32F32x4ExtractLane: {
- XMMRegister dst = i.OutputFloatRegister();
- XMMRegister src = i.InputSimd128Register(0);
- uint8_t lane = i.InputUint8(1);
- DCHECK_LT(lane, 4);
- // These instructions are shorter than insertps, but will leave junk in
- // the top lanes of dst.
- if (lane == 0) {
- if (dst != src) {
- __ Movaps(dst, src);
- }
- } else if (lane == 1) {
- __ Movshdup(dst, src);
- } else if (lane == 2 && dst == src) {
- // Check dst == src to avoid false dependency on dst.
- __ Movhlps(dst, src);
- } else if (dst == src) {
- __ Shufps(dst, src, src, lane);
- } else {
- __ Pshufd(dst, src, lane);
- }
+ __ F32x4ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
+ i.InputUint8(1));
break;
}
case kIA32Insertps: {
@@ -2357,10 +2241,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src = i.InputSimd128Register(0);
__ Pxor(kScratchDoubleReg, kScratchDoubleReg); // zeros
- __ Pblendw(kScratchDoubleReg, src, 0x55); // get lo 16 bits
+ __ Pblendw(kScratchDoubleReg, src, uint8_t{0x55}); // get lo 16 bits
__ Psubd(dst, src, kScratchDoubleReg); // get hi 16 bits
__ Cvtdq2ps(kScratchDoubleReg, kScratchDoubleReg); // convert lo exactly
- __ Psrld(dst, dst, 1); // divide by 2 to get in unsigned range
+ __ Psrld(dst, dst, byte{1}); // divide by 2 to get in unsigned range
__ Cvtdq2ps(dst, dst); // convert hi exactly
__ Addps(dst, dst, dst); // double hi, exactly
__ Addps(dst, dst, kScratchDoubleReg); // add hi and lo, may round.
@@ -2371,11 +2255,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Psrld(kScratchDoubleReg, kScratchDoubleReg, 1);
+ __ Psrld(kScratchDoubleReg, kScratchDoubleReg, byte{1});
__ Andps(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
- __ Psrld(dst, dst, 1);
+ __ Psrld(dst, dst, byte{1});
__ Andps(dst, src);
}
break;
@@ -2385,11 +2269,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
XMMRegister src = i.InputSimd128Register(0);
if (dst == src) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pslld(kScratchDoubleReg, kScratchDoubleReg, 31);
+ __ Pslld(kScratchDoubleReg, kScratchDoubleReg, byte{31});
__ Xorps(dst, kScratchDoubleReg);
} else {
__ Pcmpeqd(dst, dst);
- __ Pslld(dst, dst, 31);
+ __ Pslld(dst, dst, byte{31});
__ Xorps(dst, src);
}
break;
@@ -2580,7 +2464,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cvttps2dq(dst, dst);
// Set top bit if >=0 is now < 0
__ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, kScratchDoubleReg, 31);
+ __ Psrad(kScratchDoubleReg, kScratchDoubleReg, byte{31});
// Set positive overflow lanes to 0x7FFFFFFF
__ Pxor(dst, kScratchDoubleReg);
break;
@@ -3564,15 +3448,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32S128Not: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Pxor(dst, src);
- }
+ __ S128Not(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kSSES128And: {
@@ -3739,7 +3616,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_NE(0xe4, shuffle); // A simple blend should be handled below.
__ Pshufd(kScratchDoubleReg, i.InputOperand(1), shuffle);
__ Pshufd(i.OutputSimd128Register(), i.InputOperand(0), shuffle);
- __ Pblendw(i.OutputSimd128Register(), kScratchDoubleReg, i.InputInt8(3));
+ __ Pblendw(i.OutputSimd128Register(), kScratchDoubleReg, i.InputUint8(3));
break;
}
case kIA32S16x8Blend:
@@ -3757,7 +3634,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pshufhw(kScratchDoubleReg, kScratchDoubleReg, i.InputUint8(3));
__ Pshuflw(dst, i.InputOperand(0), i.InputUint8(2));
__ Pshufhw(dst, dst, i.InputUint8(3));
- __ Pblendw(dst, kScratchDoubleReg, i.InputInt8(4));
+ __ Pblendw(dst, kScratchDoubleReg, i.InputUint8(4));
break;
}
case kIA32S8x16Alignr:
@@ -4250,7 +4127,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
static Condition FlagsConditionToCondition(FlagsCondition condition) {
switch (condition) {
@@ -4649,7 +4526,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
}
@@ -4738,7 +4615,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ cmp(g.ToRegister(additional_pop_count), Immediate(0));
__ Assert(equal, AbortReason::kUnexpectedAdditionalPopValue);
}
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index f06ed5156e8..42af3326f3a 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -117,10 +117,8 @@ namespace compiler {
V(IA32Poke) \
V(IA32Peek) \
V(IA32F64x2Splat) \
- V(SSEF64x2ExtractLane) \
- V(AVXF64x2ExtractLane) \
- V(SSEF64x2ReplaceLane) \
- V(AVXF64x2ReplaceLane) \
+ V(F64x2ExtractLane) \
+ V(F64x2ReplaceLane) \
V(IA32F64x2Sqrt) \
V(IA32F64x2Add) \
V(IA32F64x2Sub) \
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 3d1c7073591..278e7ea99b9 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -102,10 +102,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32BitcastFI:
case kIA32BitcastIF:
case kIA32F64x2Splat:
- case kSSEF64x2ExtractLane:
- case kAVXF64x2ExtractLane:
- case kSSEF64x2ReplaceLane:
- case kAVXF64x2ReplaceLane:
+ case kF64x2ExtractLane:
+ case kF64x2ReplaceLane:
case kIA32F64x2Sqrt:
case kIA32F64x2Add:
case kIA32F64x2Sub:
diff --git a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index 033a566e113..703d574eed8 100644
--- a/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/chromium/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -65,7 +65,8 @@ class IA32OperandGenerator final : public OperandGenerator {
bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
int effect_level) {
- if (input->opcode() != IrOpcode::kLoad ||
+ if ((input->opcode() != IrOpcode::kLoad &&
+ input->opcode() != IrOpcode::kLoadImmutable) ||
!selector()->CanCover(node, input)) {
return false;
}
@@ -551,6 +552,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
case MachineRepresentation::kNone:
+ case MachineRepresentation::kMapWord:
UNREACHABLE();
}
@@ -633,6 +635,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -816,7 +819,8 @@ void InstructionSelector::VisitStackPointerGreaterThan(
Node* const value = node->InputAt(0);
if (g.CanBeMemoryOperand(kIA32Cmp, node, value, effect_level)) {
- DCHECK_EQ(IrOpcode::kLoad, value->opcode());
+ DCHECK(value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable);
// GetEffectiveAddressMemoryOperand can create at most 3 inputs.
static constexpr int kMaxInputCount = 3;
@@ -1432,7 +1436,8 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
InstructionCode opcode, Node* left,
InstructionOperand right,
FlagsContinuation* cont) {
- DCHECK_EQ(IrOpcode::kLoad, left->opcode());
+ DCHECK(left->opcode() == IrOpcode::kLoad ||
+ left->opcode() == IrOpcode::kLoadImmutable);
IA32OperandGenerator g(selector);
size_t input_count = 0;
InstructionOperand inputs[4];
@@ -1463,7 +1468,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
- if (hint_node->opcode() == IrOpcode::kLoad) {
+ if (hint_node->opcode() == IrOpcode::kLoad ||
+ hint_node->opcode() == IrOpcode::kLoadImmutable) {
MachineType hint = LoadRepresentationOf(hint_node->op());
if (node->opcode() == IrOpcode::kInt32Constant ||
node->opcode() == IrOpcode::kInt64Constant) {
@@ -1497,8 +1503,10 @@ MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
}
}
}
- return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
- : MachineType::None();
+ return node->opcode() == IrOpcode::kLoad ||
+ node->opcode() == IrOpcode::kLoadImmutable
+ ? LoadRepresentationOf(node->op())
+ : MachineType::None();
}
// Tries to match the size of the given opcode to that of the operands, if
@@ -2346,30 +2354,24 @@ void InstructionSelector::VisitS128Const(Node* node) {
void InstructionSelector::VisitF64x2Min(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
if (IsSupported(AVX)) {
- Emit(kIA32F64x2Min, g.DefineAsRegister(node), operand0, operand1,
- arraysize(temps), temps);
+ Emit(kIA32F64x2Min, g.DefineAsRegister(node), operand0, operand1);
} else {
- Emit(kIA32F64x2Min, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ Emit(kIA32F64x2Min, g.DefineSameAsFirst(node), operand0, operand1);
}
}
void InstructionSelector::VisitF64x2Max(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand temps[] = {g.TempSimd128Register()};
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
if (IsSupported(AVX)) {
- Emit(kIA32F64x2Max, g.DefineAsRegister(node), operand0, operand1,
- arraysize(temps), temps);
+ Emit(kIA32F64x2Max, g.DefineAsRegister(node), operand0, operand1);
} else {
- Emit(kIA32F64x2Max, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ Emit(kIA32F64x2Max, g.DefineSameAsFirst(node), operand0, operand1);
}
}
@@ -2378,7 +2380,7 @@ void InstructionSelector::VisitF64x2Splat(Node* node) {
}
void InstructionSelector::VisitF64x2ExtractLane(Node* node) {
- VisitRRISimd(this, node, kAVXF64x2ExtractLane, kSSEF64x2ExtractLane);
+ VisitRRISimd(this, node, kF64x2ExtractLane, kF64x2ExtractLane);
}
void InstructionSelector::VisitI64x2SplatI32Pair(Node* node) {
@@ -2406,7 +2408,10 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
void InstructionSelector::VisitI64x2Neg(Node* node) {
IA32OperandGenerator g(this);
- InstructionOperand operand0 = g.UseUnique(node->InputAt(0));
+ // If AVX unsupported, make sure dst != src to avoid a move.
+ InstructionOperand operand0 = IsSupported(AVX)
+ ? g.UseRegister(node->InputAt(0))
+ : g.UseUnique(node->InputAt(0));
Emit(kIA32I64x2Neg, g.DefineAsRegister(node), operand0);
}
@@ -2546,27 +2551,15 @@ SIMD_REPLACE_LANE_TYPE_OP(VISIT_SIMD_REPLACE_LANE)
#undef VISIT_SIMD_REPLACE_LANE
#undef SIMD_REPLACE_LANE_TYPE_OP
-// The difference between this and VISIT_SIMD_REPLACE_LANE is that this forces
-// operand2 to be UseRegister, because the codegen relies on insertps using
-// registers.
-// TODO(v8:9764) Remove this UseRegister requirement
-#define VISIT_SIMD_REPLACE_LANE_USE_REG(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- IA32OperandGenerator g(this); \
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0)); \
- InstructionOperand operand1 = \
- g.UseImmediate(OpParameter<int32_t>(node->op())); \
- InstructionOperand operand2 = g.UseUniqueRegister(node->InputAt(1)); \
- if (IsSupported(AVX)) { \
- Emit(kAVX##Type##ReplaceLane, g.DefineAsRegister(node), operand0, \
- operand1, operand2); \
- } else { \
- Emit(kSSE##Type##ReplaceLane, g.DefineSameAsFirst(node), operand0, \
- operand1, operand2); \
- } \
- }
-VISIT_SIMD_REPLACE_LANE_USE_REG(F64x2)
-#undef VISIT_SIMD_REPLACE_LANE_USE_REG
+void InstructionSelector::VisitF64x2ReplaceLane(Node* node) {
+ IA32OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node->op());
+ // When no-AVX, define dst == src to save a move.
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kF64x2ReplaceLane, dst, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(lane), g.UseRegister(node->InputAt(1)));
+}
#define VISIT_SIMD_SHIFT_UNIFIED_SSE_AVX(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
@@ -3159,6 +3152,12 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
VisitRRSimd(this, node, kIA32I64x2Abs, kIA32I64x2Abs);
}
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/instruction-scheduler.h b/chromium/v8/src/compiler/backend/instruction-scheduler.h
index a7c1cc52460..c22190bd50f 100644
--- a/chromium/v8/src/compiler/backend/instruction-scheduler.h
+++ b/chromium/v8/src/compiler/backend/instruction-scheduler.h
@@ -6,15 +6,11 @@
#define V8_COMPILER_BACKEND_INSTRUCTION_SCHEDULER_H_
#include "src/base/optional.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/compiler/backend/instruction.h"
#include "src/zone/zone-containers.h"
namespace v8 {
-
-namespace base {
-class RandomNumberGenerator;
-} // namespace base
-
namespace internal {
namespace compiler {
diff --git a/chromium/v8/src/compiler/backend/instruction-selector-impl.h b/chromium/v8/src/compiler/backend/instruction-selector-impl.h
index 7e1f183fb71..539ba5dc255 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector-impl.h
+++ b/chromium/v8/src/compiler/backend/instruction-selector-impl.h
@@ -85,10 +85,12 @@ class OperandGenerator {
GetVReg(node)));
}
+ InstructionOperand DefineSameAsInput(Node* node, int input_index) {
+ return Define(node, UnallocatedOperand(GetVReg(node), input_index));
+ }
+
InstructionOperand DefineSameAsFirst(Node* node) {
- return Define(node,
- UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT,
- GetVReg(node)));
+ return DefineSameAsInput(node, 0);
}
InstructionOperand DefineAsFixed(Node* node, Register reg) {
@@ -224,7 +226,7 @@ class OperandGenerator {
int AllocateVirtualRegister() { return sequence()->NextVirtualRegister(); }
InstructionOperand DefineSameAsFirstForVreg(int vreg) {
- return UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT, vreg);
+ return UnallocatedOperand(UnallocatedOperand::SAME_AS_INPUT, vreg);
}
InstructionOperand DefineAsRegistertForVreg(int vreg) {
diff --git a/chromium/v8/src/compiler/backend/instruction-selector.cc b/chromium/v8/src/compiler/backend/instruction-selector.cc
index 5638ff1241a..c806ad8bbe7 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector.cc
+++ b/chromium/v8/src/compiler/backend/instruction-selector.cc
@@ -9,6 +9,7 @@
#include "src/base/iterator.h"
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler-inl.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/instruction-selector-impl.h"
#include "src/compiler/compiler-source-position-table.h"
@@ -802,11 +803,6 @@ size_t InstructionSelector::AddInputsToFrameStateDescriptor(
}
Instruction* InstructionSelector::EmitWithContinuation(
- InstructionCode opcode, FlagsContinuation* cont) {
- return EmitWithContinuation(opcode, 0, nullptr, 0, nullptr, cont);
-}
-
-Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, InstructionOperand a, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, 0, nullptr, 1, &a, cont);
}
@@ -880,8 +876,16 @@ Instruction* InstructionSelector::EmitWithContinuation(
AppendDeoptimizeArguments(&continuation_inputs_, cont->kind(),
cont->reason(), cont->feedback(),
FrameState{cont->frame_state()});
- } else if (cont->IsSet() || cont->IsSelect()) {
+ } else if (cont->IsSet()) {
continuation_outputs_.push_back(g.DefineAsRegister(cont->result()));
+ } else if (cont->IsSelect()) {
+ // The {Select} should put one of two values into the output register,
+ // depending on the result of the condition. The two result values are in
+ // the last two input slots, the {false_value} in {input_count - 2}, and the
+ // true_value in {input_count - 1}. The other inputs are used for the
+ // condition.
+ AddOutputToSelectContinuation(&g, static_cast<int>(input_count) - 2,
+ cont->result());
} else if (cont->IsTrap()) {
int trap_id = static_cast<int>(cont->trap_id());
continuation_inputs_.push_back(g.UseImmediate(trap_id));
@@ -1554,6 +1558,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitWord32Popcnt(node);
case IrOpcode::kWord64Popcnt:
return MarkAsWord32(node), VisitWord64Popcnt(node);
+ case IrOpcode::kWord32Select:
+ return MarkAsWord32(node), VisitSelect(node);
case IrOpcode::kWord64And:
return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
@@ -1584,6 +1590,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord64(node), VisitInt64AbsWithOverflow(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
+ case IrOpcode::kWord64Select:
+ return MarkAsWord64(node), VisitSelect(node);
case IrOpcode::kInt32Add:
return MarkAsWord32(node), VisitInt32Add(node);
case IrOpcode::kInt32AddWithOverflow:
@@ -2842,7 +2850,7 @@ constexpr InstructionCode EncodeCallDescriptorFlags(
// Note: Not all bits of `flags` are preserved.
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
MiscField::kSize);
- CONSTEXPR_DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
+ DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
return opcode | MiscField::encode(flags & MiscField::kMax);
}
@@ -2927,8 +2935,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
if (call_descriptor->NeedsCallerSavedRegisters()) {
SaveFPRegsMode mode = call_descriptor->NeedsCallerSavedFPRegisters()
- ? kSaveFPRegs
- : kDontSaveFPRegs;
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
Emit(kArchSaveCallerRegisters | MiscField::encode(static_cast<int>(mode)),
g.NoOutput());
}
@@ -3007,8 +3015,8 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
if (call_descriptor->NeedsCallerSavedRegisters()) {
SaveFPRegsMode mode = call_descriptor->NeedsCallerSavedFPRegisters()
- ? kSaveFPRegs
- : kDontSaveFPRegs;
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
Emit(
kArchRestoreCallerRegisters | MiscField::encode(static_cast<int>(mode)),
g.NoOutput());
diff --git a/chromium/v8/src/compiler/backend/instruction-selector.h b/chromium/v8/src/compiler/backend/instruction-selector.h
index 8984c05c3a6..8bfe7fff6f7 100644
--- a/chromium/v8/src/compiler/backend/instruction-selector.h
+++ b/chromium/v8/src/compiler/backend/instruction-selector.h
@@ -680,6 +680,9 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
bool CanProduceSignalingNaN(Node* node);
+ void AddOutputToSelectContinuation(OperandGenerator* g, int first_input_index,
+ Node* node);
+
// ===========================================================================
// ============= Vector instruction (SIMD) helper fns. =======================
// ===========================================================================
diff --git a/chromium/v8/src/compiler/backend/instruction.cc b/chromium/v8/src/compiler/backend/instruction.cc
index 43824e8fcb9..f90a249d9a3 100644
--- a/chromium/v8/src/compiler/backend/instruction.cc
+++ b/chromium/v8/src/compiler/backend/instruction.cc
@@ -155,8 +155,8 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
return os << "(R)";
case UnallocatedOperand::MUST_HAVE_SLOT:
return os << "(S)";
- case UnallocatedOperand::SAME_AS_FIRST_INPUT:
- return os << "(1)";
+ case UnallocatedOperand::SAME_AS_INPUT:
+ return os << "(" << unalloc->input_index() << ")";
case UnallocatedOperand::REGISTER_OR_SLOT:
return os << "(-)";
case UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
@@ -247,6 +247,8 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
case MachineRepresentation::kCompressed:
os << "|c";
break;
+ case MachineRepresentation::kMapWord:
+ UNREACHABLE();
}
return os << "]";
}
@@ -914,6 +916,7 @@ static MachineRepresentation FilterRepresentation(MachineRepresentation rep) {
case MachineRepresentation::kCompressed:
return rep;
case MachineRepresentation::kNone:
+ case MachineRepresentation::kMapWord:
break;
}
diff --git a/chromium/v8/src/compiler/backend/instruction.h b/chromium/v8/src/compiler/backend/instruction.h
index 88a114e08e2..33146f3c4f0 100644
--- a/chromium/v8/src/compiler/backend/instruction.h
+++ b/chromium/v8/src/compiler/backend/instruction.h
@@ -185,7 +185,7 @@ class UnallocatedOperand final : public InstructionOperand {
FIXED_FP_REGISTER,
MUST_HAVE_REGISTER,
MUST_HAVE_SLOT,
- SAME_AS_FIRST_INPUT
+ SAME_AS_INPUT
};
// Lifetime of operand inside the instruction.
@@ -208,6 +208,14 @@ class UnallocatedOperand final : public InstructionOperand {
value_ |= LifetimeField::encode(USED_AT_END);
}
+ UnallocatedOperand(int virtual_register, int input_index)
+ : UnallocatedOperand(virtual_register) {
+ value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+ value_ |= ExtendedPolicyField::encode(SAME_AS_INPUT);
+ value_ |= LifetimeField::encode(USED_AT_END);
+ value_ |= InputIndexField::encode(input_index);
+ }
+
UnallocatedOperand(BasicPolicy policy, int index, int virtual_register)
: UnallocatedOperand(virtual_register) {
DCHECK(policy == FIXED_SLOT);
@@ -270,7 +278,7 @@ class UnallocatedOperand final : public InstructionOperand {
}
bool HasSameAsInputPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
- extended_policy() == SAME_AS_FIRST_INPUT;
+ extended_policy() == SAME_AS_INPUT;
}
bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
bool HasFixedRegisterPolicy() const {
@@ -300,6 +308,11 @@ class UnallocatedOperand final : public InstructionOperand {
return ExtendedPolicyField::decode(value_);
}
+ int input_index() const {
+ DCHECK(HasSameAsInputPolicy());
+ return InputIndexField::decode(value_);
+ }
+
// [fixed_slot_index]: Only for FIXED_SLOT.
int fixed_slot_index() const {
DCHECK(HasFixedSlotPolicy());
@@ -362,6 +375,7 @@ class UnallocatedOperand final : public InstructionOperand {
using HasSecondaryStorageField = base::BitField64<bool, 40, 1>;
using FixedRegisterField = base::BitField64<int, 41, 6>;
using SecondaryStorageField = base::BitField64<int, 47, 3>;
+ using InputIndexField = base::BitField64<int, 50, 3>;
private:
explicit UnallocatedOperand(int virtual_register)
@@ -545,6 +559,8 @@ class LocationOperand : public InstructionOperand {
case MachineRepresentation::kWord16:
case MachineRepresentation::kNone:
return false;
+ case MachineRepresentation::kMapWord:
+ break;
}
UNREACHABLE();
}
diff --git a/chromium/v8/src/compiler/backend/mid-tier-register-allocator.cc b/chromium/v8/src/compiler/backend/mid-tier-register-allocator.cc
index 394f319dc03..ee9cd3dddee 100644
--- a/chromium/v8/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/chromium/v8/src/compiler/backend/mid-tier-register-allocator.cc
@@ -587,8 +587,7 @@ void VirtualRegisterData::AddSpillUse(int instr_index,
const InstructionBlock* block = data->GetBlock(instr_index);
if (CouldSpillOnEntryToDeferred(block)) {
- // TODO(1180335): Remove once crbug.com/1180335 is fixed.
- CHECK(HasSpillRange());
+ DCHECK(HasSpillRange());
data->block_state(block->rpo_number())
.deferred_blocks_region()
->DeferSpillOutputUntilEntry(vreg());
@@ -614,8 +613,7 @@ void VirtualRegisterData::AddDeferredSpillOutput(
AllocatedOperand allocated_op, int instr_index,
MidTierRegisterAllocationData* data) {
DCHECK(!NeedsSpillAtOutput());
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(HasSpillRange());
+ DCHECK(HasSpillRange());
spill_range_->AddDeferredSpillOutput(allocated_op, instr_index, data);
}
@@ -937,9 +935,8 @@ void RegisterState::Register::Use(int virtual_register, int instr_index) {
// A register can have many pending uses, but should only ever have a single
// non-pending use, since any subsiquent use will commit the preceeding use
// first.
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(!is_allocated());
- CHECK(!is_shared());
+ DCHECK(!is_allocated());
+ DCHECK(!is_shared());
needs_gap_move_on_spill_ = true;
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
@@ -950,8 +947,7 @@ void RegisterState::Register::PendingUse(InstructionOperand* operand,
int virtual_register,
bool can_be_constant,
int instr_index) {
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(!was_spilled_while_shared());
+ DCHECK(!was_spilled_while_shared());
if (!is_allocated()) {
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
@@ -972,8 +968,7 @@ void RegisterState::Register::MarkAsPhiMove() {
void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
bool on_exit, Zone* zone) {
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(is_allocated());
+ DCHECK(is_allocated());
if (!deferred_block_spills_) {
deferred_block_spills_.emplace(zone);
}
@@ -981,16 +976,14 @@ void RegisterState::Register::AddDeferredBlockSpill(int instr_index,
}
void RegisterState::Register::AddSharedUses(int shared_use_count) {
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(!was_spilled_while_shared());
+ DCHECK(!was_spilled_while_shared());
is_shared_ = true;
num_commits_required_ += shared_use_count;
}
void RegisterState::Register::CommitAtMerge() {
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(is_shared());
- CHECK(is_allocated());
+ DCHECK(is_shared());
+ DCHECK(is_allocated());
--num_commits_required_;
// We should still have commits required that will be resolved in the merge
// block.
@@ -999,9 +992,8 @@ void RegisterState::Register::CommitAtMerge() {
void RegisterState::Register::Commit(AllocatedOperand allocated_op,
MidTierRegisterAllocationData* data) {
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(is_allocated());
- CHECK_GT(num_commits_required_, 0);
+ DCHECK(is_allocated());
+ DCHECK_GT(num_commits_required_, 0);
if (--num_commits_required_ == 0) {
// Allocate all pending uses to |allocated_op| if this commit is non-shared,
@@ -1038,8 +1030,7 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op,
vreg_data.EmitDeferredSpillOutputs(data);
}
}
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK_IMPLIES(num_commits_required_ > 0, is_shared());
+ DCHECK_IMPLIES(num_commits_required_ > 0, is_shared());
}
void RegisterState::Register::Spill(AllocatedOperand allocated_op,
@@ -1106,9 +1097,8 @@ void RegisterState::Register::SpillPendingUses(
void RegisterState::Register::SpillForDeferred(
AllocatedOperand allocated, int instr_index,
MidTierRegisterAllocationData* data) {
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(is_allocated());
- CHECK(is_shared());
+ DCHECK(is_allocated());
+ DCHECK(is_shared());
// Add a pending deferred spill, then commit the register (with the commit
// being fullfilled by the deferred spill if the register is fully commited).
data->VirtualRegisterDataFor(virtual_register())
@@ -1120,8 +1110,7 @@ void RegisterState::Register::SpillForDeferred(
void RegisterState::Register::MoveToSpillSlotOnDeferred(
int virtual_register, int instr_index,
MidTierRegisterAllocationData* data) {
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(!was_spilled_while_shared());
+ DCHECK(!was_spilled_while_shared());
if (!is_allocated()) {
virtual_register_ = virtual_register;
last_use_instr_index_ = instr_index;
@@ -2168,8 +2157,7 @@ void SinglePassRegisterAllocator::AllocateDeferredBlockSpillOutput(
int instr_index, RpoNumber deferred_block,
VirtualRegisterData& virtual_register) {
DCHECK(data()->GetBlock(deferred_block)->IsDeferred());
- // TODO(1180335): Make DCHECK once crbug.com/1180335 is fixed.
- CHECK(virtual_register.HasSpillRange());
+ DCHECK(virtual_register.HasSpillRange());
if (!virtual_register.NeedsSpillAtOutput() &&
!DefinedAfter(virtual_register.vreg(), instr_index, UsePosition::kEnd)) {
// If a register has been assigned to the virtual register, and the virtual
diff --git a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
index 4066ba77e80..4e7b69e45b4 100644
--- a/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -161,18 +161,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
__ Addu(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
@@ -373,10 +371,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} else { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
- __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
+ __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \
__ PrepareCallCFunction(3, 0, kScratchReg); \
__ CallCFunction(ExternalReference::external(), 3, 0); \
- __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
+ __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \
} \
} while (0)
@@ -403,10 +401,10 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} else { \
FrameScope scope(tasm(), StackFrame::MANUAL); \
__ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \
- __ PushCallerSaved(kDontSaveFPRegs, v0, v1); \
+ __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \
__ PrepareCallCFunction(3, 0, kScratchReg); \
__ CallCFunction(ExternalReference::external(), 3, 0); \
- __ PopCallerSaved(kDontSaveFPRegs, v0, v1); \
+ __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \
} \
} while (0)
@@ -494,8 +492,8 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
@@ -746,7 +744,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -759,7 +758,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -781,7 +781,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int offset = __ root_array_available() ? 68 : 80;
#endif // V8_ENABLE_WEBASSEMBLY
#if V8_HOST_ARCH_MIPS
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
offset += 16;
}
#endif
@@ -873,9 +873,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRet:
AssembleReturn(instr->InputAt(0));
break;
- case kArchStackPointerGreaterThan:
- // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ case kArchStackPointerGreaterThan: {
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(1);
+ __ Subu(lhs_register, sp, offset);
+ }
+ __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
break;
+ }
case kArchStackCheckOffset:
__ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
break;
@@ -906,6 +913,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode());
__ Addu(kScratchReg, object, index);
__ sw(value, MemOperand(kScratchReg));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -1922,10 +1932,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
FrameScope scope(tasm(), StackFrame::MANUAL);
__ Addu(a0, i.InputRegister(0), i.InputRegister(1));
- __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
__ PrepareCallCFunction(1, 0, kScratchReg);
__ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0);
- __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
}
break;
}
@@ -1945,10 +1955,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
FrameScope scope(tasm(), StackFrame::MANUAL);
__ Addu(a0, i.InputRegister(0), i.InputRegister(1));
- __ PushCallerSaved(kDontSaveFPRegs);
+ __ PushCallerSaved(SaveFPRegsMode::kIgnore);
__ PrepareCallCFunction(3, 0, kScratchReg);
__ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0);
- __ PopCallerSaved(kDontSaveFPRegs);
+ __ PopCallerSaved(SaveFPRegsMode::kIgnore);
}
break;
}
@@ -1986,12 +1996,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sync();
} else {
FrameScope scope(tasm(), StackFrame::MANUAL);
- __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
__ PrepareCallCFunction(3, 0, kScratchReg);
__ Addu(a0, i.InputRegister(0), i.InputRegister(1));
__ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3,
0);
- __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
}
break;
case kMipsWord32AtomicPairCompareExchange: {
@@ -2016,13 +2026,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ sync();
} else {
FrameScope scope(tasm(), StackFrame::MANUAL);
- __ PushCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
__ PrepareCallCFunction(5, 0, kScratchReg);
__ addu(a0, i.InputRegister(0), i.InputRegister(1));
__ sw(i.InputRegister(5), MemOperand(sp, 16));
__ CallCFunction(
ExternalReference::atomic_pair_compare_exchange_function(), 5, 0);
- __ PopCallerSaved(kDontSaveFPRegs, v0, v1);
+ __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1);
}
break;
}
@@ -3652,7 +3662,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
@@ -3704,13 +3714,11 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
- Register lhs_register = sp;
- uint32_t offset;
- if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) {
- lhs_register = i.TempRegister(0);
- __ Subu(lhs_register, sp, offset);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.TempRegister(0), i.TempRegister(0), 1);
}
- __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0)));
+ __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsCmpS ||
instr->arch_opcode() == kMipsCmpD) {
bool predicate;
@@ -4020,6 +4028,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
}
return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.OutputRegister(), i.TempRegister(0), 1);
+ }
+ return;
} else {
PrintF("AssembleArchBoolean Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
@@ -4132,7 +4147,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
@@ -4234,7 +4249,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
g.ToRegister(additional_pop_count),
Operand(static_cast<int64_t>(0)));
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 0b8a022014e..48635c9c15b 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -1302,7 +1302,7 @@ int MultiPushFPULatency() {
int PushCallerSavedLatency(SaveFPRegsMode fp_mode) {
int latency = MultiPushLatency();
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
latency += MultiPushFPULatency();
}
return latency;
@@ -1326,7 +1326,7 @@ int MultiPopLatency() {
int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
int latency = 0;
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
latency += MultiPopFPULatency();
}
return latency + MultiPopLatency();
diff --git a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index dcea4c85660..b001578b888 100644
--- a/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/chromium/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -372,6 +372,7 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -457,6 +458,7 @@ void InstructionSelector::VisitStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1432,6 +1434,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1485,6 +1488,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1630,11 +1634,12 @@ void InstructionSelector::VisitStackPointerGreaterThan(
InstructionOperand* const outputs = nullptr;
const int output_count = 0;
+ // TempRegister(0) is used to store the comparison result.
// Applying an offset to this stack check requires a temp register. Offsets
// are only applied to the first stack check. If applying an offset, we must
// ensure the input and temp registers do not alias, thus kUniqueRegister.
- InstructionOperand temps[] = {g.TempRegister()};
- const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
? OperandGenerator::kUniqueRegister
: OperandGenerator::kRegister;
@@ -2500,6 +2505,12 @@ void InstructionSelector::VisitF64x2Pmax(Node* node) {
VisitUniqueRRR(this, kMipsF64x2Pmax, node);
}
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 6edb1539439..257ef1bca15 100644
--- a/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -163,18 +163,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
__ Daddu(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
@@ -465,8 +463,8 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
__ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
__ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
size, sign_extend); \
- __ ExtractBits(i.InputRegister(2), i.InputRegister(2), i.TempRegister(1), \
- size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ sign_extend); \
__ BranchShort(&exit, ne, i.InputRegister(2), \
Operand(i.OutputRegister(0))); \
__ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
@@ -711,7 +709,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -724,7 +723,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -746,7 +746,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
int offset = __ root_array_available() ? 76 : 88;
#endif // V8_ENABLE_WEBASSEMBLY
#if V8_HOST_ARCH_MIPS64
- if (__ emit_debug_code()) {
+ if (FLAG_debug_code) {
offset += 16;
}
#endif
@@ -835,9 +835,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRet:
AssembleReturn(instr->InputAt(0));
break;
- case kArchStackPointerGreaterThan:
- // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ case kArchStackPointerGreaterThan: {
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(1);
+ __ Dsubu(lhs_register, sp, offset);
+ }
+ __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
break;
+ }
case kArchStackCheckOffset:
__ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
break;
@@ -868,6 +875,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -3817,7 +3827,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
#define UNSUPPORTED_COND(opcode, condition) \
StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
@@ -3880,13 +3890,11 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
- Register lhs_register = sp;
- uint32_t offset;
- if (gen->ShouldApplyOffsetToStackCheck(instr, &offset)) {
- lhs_register = i.TempRegister(0);
- __ Dsubu(lhs_register, sp, offset);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.TempRegister(0), i.TempRegister(0), 1);
}
- __ Branch(tlabel, cc, lhs_register, Operand(i.InputRegister(0)));
+ __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64CmpS ||
instr->arch_opcode() == kMips64CmpD) {
bool predicate;
@@ -4232,6 +4240,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
}
return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.OutputRegister(), i.TempRegister(0), 1);
+ }
+ return;
} else {
PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
instr->arch_opcode());
@@ -4342,7 +4357,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
@@ -4446,7 +4461,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
g.ToRegister(additional_pop_count),
Operand(static_cast<int64_t>(0)));
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index 3b0e5b85fb4..c63e0aa3d36 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -932,7 +932,7 @@ int MultiPushFPULatency() {
int PushCallerSavedLatency(SaveFPRegsMode fp_mode) {
int latency = MultiPushLatency();
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
latency += MultiPushFPULatency();
}
return latency;
@@ -956,7 +956,7 @@ int MultiPopFPULatency() {
int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
int latency = MultiPopLatency();
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
latency += MultiPopFPULatency();
}
return latency;
diff --git a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index b0b3fec8f38..6d6b487ccdd 100644
--- a/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/chromium/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -512,6 +512,7 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -589,6 +590,7 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1452,7 +1454,9 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
@@ -1853,6 +1857,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1906,6 +1911,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -2265,11 +2271,12 @@ void InstructionSelector::VisitStackPointerGreaterThan(
InstructionOperand* const outputs = nullptr;
const int output_count = 0;
+ // TempRegister(0) is used to store the comparison result.
// Applying an offset to this stack check requires a temp register. Offsets
// are only applied to the first stack check. If applying an offset, we must
// ensure the input and temp registers do not alias, thus kUniqueRegister.
- InstructionOperand temps[] = {g.TempRegister()};
- const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
? OperandGenerator::kUniqueRegister
: OperandGenerator::kRegister;
@@ -3291,6 +3298,12 @@ VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S, MSAS16)
VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U, MSAU16)
#undef VISIT_EXTADD_PAIRWISE
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/ppc/OWNERS b/chromium/v8/src/compiler/backend/ppc/OWNERS
deleted file mode 100644
index 02c2cd757c9..00000000000
--- a/chromium/v8/src/compiler/backend/ppc/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-junyan@redhat.com
-joransiu@ca.ibm.com
-midawson@redhat.com
-mfarazma@redhat.com
-vasili.skurydzin@ibm.com
diff --git a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 334d318f02e..2dfae5d2d6a 100644
--- a/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -171,9 +171,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
void Generate() final {
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
@@ -187,10 +184,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ add(scratch1_, object_, offset_);
}
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore lr if the frame was elided.
__ mflr(scratch0_);
@@ -824,8 +822,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
__ LoadTaggedPointerField(
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
- __ LoadWordArith(
- r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ LoadS32(r11,
+ FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
__ TestBit(r11, Code::kMarkedForDeoptimizationBit);
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
RelocInfo::CODE_TARGET, ne, cr0);
@@ -981,7 +979,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -994,7 +993,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -1130,7 +1130,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchParentFramePointer:
if (frame_access_state()->has_frame()) {
- __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
+ __ LoadU64(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mr(i.OutputRegister(), fp);
}
@@ -1192,6 +1192,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode(), &unwinding_info_writer_);
__ StoreTaggedFieldX(value, MemOperand(object, offset), r0);
}
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -1222,11 +1225,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
__ mov(ip, Operand(offset));
- __ LoadSimd128(i.OutputSimd128Register(), MemOperand(fp, ip), r0,
- kScratchSimd128Reg);
+ __ LoadSimd128(i.OutputSimd128Register(), MemOperand(fp, ip));
}
} else {
- __ LoadP(i.OutputRegister(), MemOperand(fp, offset), r0);
+ __ LoadU64(i.OutputRegister(), MemOperand(fp, offset), r0);
}
break;
}
@@ -1748,8 +1750,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case MachineRepresentation::kSimd128:
__ addi(sp, sp, Operand(-kSimd128Size));
- __ StoreSimd128(i.InputSimd128Register(1), MemOperand(r0, sp), r0,
- kScratchSimd128Reg);
+ __ StoreSimd128(i.InputSimd128Register(1), MemOperand(r0, sp));
break;
default:
__ StorePU(i.InputRegister(1), MemOperand(sp, -kSystemPointerSize),
@@ -1791,8 +1792,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
__ mov(ip, Operand(slot * kSystemPointerSize));
- __ StoreSimd128(i.InputSimd128Register(0), MemOperand(ip, sp), r0,
- kScratchSimd128Reg);
+ __ StoreSimd128(i.InputSimd128Register(0), MemOperand(ip, sp));
}
} else {
__ StoreP(i.InputRegister(0), MemOperand(sp, slot * kSystemPointerSize),
@@ -2057,9 +2057,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
bool is_atomic = i.InputInt32(2);
- // lvx only supports MRR.
DCHECK_EQ(mode, kMode_MRR);
- __ LoadSimd128(result, operand, r0, kScratchSimd128Reg);
+ __ LoadSimd128(result, operand);
if (is_atomic) __ lwsync();
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
@@ -2095,9 +2094,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register value = i.InputSimd128Register(index);
bool is_atomic = i.InputInt32(3);
if (is_atomic) __ lwsync();
- // stvx only supports MRR.
DCHECK_EQ(mode, kMode_MRR);
- __ StoreSimd128(value, operand, r0, kScratchSimd128Reg);
+ __ StoreSimd128(value, operand);
if (is_atomic) __ sync();
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
@@ -2479,21 +2477,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_I64x2MinS: {
- __ vminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kPPC_I32x4MinS: {
__ vminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I64x2MinU: {
- __ vminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kPPC_I32x4MinU: {
__ vminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2519,21 +2507,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kPPC_I64x2MaxS: {
- __ vmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kPPC_I32x4MaxS: {
__ vmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I64x2MaxU: {
- __ vmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kPPC_I32x4MaxU: {
__ vmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2662,26 +2640,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_I64x2GeS: {
- __ vcmpequd(kScratchSimd128Reg, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vcmpgtsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchSimd128Reg);
+ __ vcmpgtsd(kScratchSimd128Reg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnor(i.OutputSimd128Register(), kScratchSimd128Reg,
+ kScratchSimd128Reg);
break;
}
case kPPC_I32x4GeS: {
- __ vcmpequw(kScratchSimd128Reg, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vcmpgtsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchSimd128Reg);
- break;
- }
- case kPPC_I64x2GtU: {
- __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ __ vcmpgtsw(kScratchSimd128Reg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnor(i.OutputSimd128Register(), kScratchSimd128Reg,
+ kScratchSimd128Reg);
break;
}
case kPPC_I32x4GtU: {
@@ -2690,16 +2659,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- case kPPC_I64x2GeU: {
- __ vcmpequd(kScratchSimd128Reg, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vcmpgtud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchSimd128Reg);
-
- break;
- }
case kPPC_I32x4GeU: {
__ vcmpequw(kScratchSimd128Reg, i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2715,12 +2674,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_I16x8GeS: {
- __ vcmpequh(kScratchSimd128Reg, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vcmpgtsh(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchSimd128Reg);
+ __ vcmpgtsh(kScratchSimd128Reg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnor(i.OutputSimd128Register(), kScratchSimd128Reg,
+ kScratchSimd128Reg);
break;
}
case kPPC_I16x8GtU: {
@@ -2743,12 +2700,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_I8x16GeS: {
- __ vcmpequb(kScratchSimd128Reg, i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vcmpgtsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- __ vor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchSimd128Reg);
+ __ vcmpgtsb(kScratchSimd128Reg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0));
+ __ vnor(i.OutputSimd128Register(), kScratchSimd128Reg,
+ kScratchSimd128Reg);
break;
}
case kPPC_I8x16GtU: {
@@ -2840,15 +2795,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_S128Const: {
- Simd128Register dst = i.OutputSimd128Register();
- constexpr int lane_width_in_bytes = 8;
uint64_t low = make_uint64(i.InputUint32(1), i.InputUint32(0));
uint64_t high = make_uint64(i.InputUint32(3), i.InputUint32(2));
__ mov(r0, Operand(low));
__ mov(ip, Operand(high));
- __ mtvsrd(dst, ip);
- __ mtvsrd(kScratchSimd128Reg, r0);
- __ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
+ __ mtvsrdd(i.OutputSimd128Register(), ip, r0);
break;
}
case kPPC_S128Zero: {
@@ -2908,24 +2859,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_I64x2Neg: {
- constexpr int lane_width_in_bytes = 8;
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- __ li(kScratchReg, Operand(1));
- __ mtvsrd(kScratchSimd128Reg, kScratchReg);
- __ vinsertd(kScratchSimd128Reg, kScratchSimd128Reg,
- Operand(1 * lane_width_in_bytes));
- // Perform negation.
- __ vnor(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(0));
- __ vaddudm(i.OutputSimd128Register(), tempFPReg1, kScratchSimd128Reg);
+ __ vnegd(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_I32x4Neg: {
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
- __ li(ip, Operand(1));
- __ mtvsrd(kScratchSimd128Reg, ip);
- __ vspltw(kScratchSimd128Reg, kScratchSimd128Reg, Operand(1));
- __ vnor(tempFPReg1, i.InputSimd128Register(0), i.InputSimd128Register(0));
- __ vadduwm(i.OutputSimd128Register(), kScratchSimd128Reg, tempFPReg1);
+ __ vnegw(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
case kPPC_I64x2Abs: {
@@ -3170,15 +3108,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kPPC_I8x16Shuffle: {
- constexpr int lane_width_in_bytes = 8;
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- __ mov(r0, Operand(make_uint64(i.InputUint32(3), i.InputUint32(2))));
- __ mov(ip, Operand(make_uint64(i.InputUint32(5), i.InputUint32(4))));
- __ mtvsrd(kScratchSimd128Reg, r0);
- __ mtvsrd(dst, ip);
- __ vinsertd(dst, kScratchSimd128Reg, Operand(1 * lane_width_in_bytes));
+ uint64_t low = make_uint64(i.InputUint32(3), i.InputUint32(2));
+ uint64_t high = make_uint64(i.InputUint32(5), i.InputUint32(4));
+ __ mov(r0, Operand(low));
+ __ mov(ip, Operand(high));
+ __ mtvsrdd(dst, ip, r0);
__ vperm(dst, src0, src1, dst);
break;
}
@@ -3226,24 +3163,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1),
- tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)),
- tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
+ tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
// Saturate the indices to 5 bits. Input indices more than 31 should
// return 0.
- __ xxspltib(tempFPReg2, Operand(31));
- __ vminub(tempFPReg2, src1, tempFPReg2);
- __ addi(sp, sp, Operand(-16));
- __ stxvd(src0, MemOperand(r0, sp));
- __ ldbrx(r0, MemOperand(r0, sp));
- __ li(ip, Operand(8));
- __ ldbrx(ip, MemOperand(ip, sp));
- __ stdx(ip, MemOperand(r0, sp));
- __ li(ip, Operand(8));
- __ stdx(r0, MemOperand(ip, sp));
- __ lxvd(kScratchSimd128Reg, MemOperand(r0, sp));
- __ addi(sp, sp, Operand(16));
- __ vxor(tempFPReg1, tempFPReg1, tempFPReg1);
- __ vperm(dst, kScratchSimd128Reg, tempFPReg1, tempFPReg2);
+ __ xxspltib(tempFPReg1, Operand(31));
+ __ vminub(tempFPReg1, src1, tempFPReg1);
+ // input needs to be reversed.
+ __ xxbrq(dst, src0);
+ __ vxor(kScratchSimd128Reg, kScratchSimd128Reg, kScratchSimd128Reg);
+ __ vperm(dst, dst, kScratchSimd128Reg, tempFPReg1);
break;
}
case kPPC_F64x2Qfma: {
@@ -3871,7 +3799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
UNREACHABLE();
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
@@ -4174,7 +4102,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
@@ -4200,11 +4128,11 @@ void CodeGenerator::AssembleConstructFrame() {
// check in the condition code.
if ((required_slots * kSystemPointerSize) < (FLAG_stack_size * 1024)) {
Register scratch = ip;
- __ LoadP(
+ __ LoadU64(
scratch,
FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kRealStackLimitAddressOffset));
- __ LoadP(scratch, MemOperand(scratch), r0);
+ __ LoadU64(scratch, MemOperand(scratch), r0);
__ Add(scratch, scratch, required_slots * kSystemPointerSize, r0);
__ cmpl(sp, scratch);
__ bge(&done);
@@ -4284,7 +4212,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ cmpi(g.ToRegister(additional_pop_count), Operand(0));
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
}
@@ -4315,7 +4243,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
}
if (drop_jsargs) {
// Get the actual argument count.
- __ LoadP(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ LoadU64(argc_reg, MemOperand(fp, StandardFrameConstants::kArgCOffset));
}
AssembleDeconstructFrame();
}
@@ -4327,7 +4255,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ addi(argc_reg, argc_reg, Operand(1)); // Also pop the receiver.
if (parameter_slots > 1) {
Label skip;
- __ cmpi(argc_reg, Operand(parameter_slots));
+ __ Cmpi(argc_reg, Operand(parameter_slots), r0);
__ bgt(&skip);
__ mov(argc_reg, Operand(parameter_slots));
__ bind(&skip);
@@ -4378,10 +4306,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
- __ LoadP(g.ToRegister(destination), src, r0);
+ __ LoadU64(g.ToRegister(destination), src, r0);
} else {
Register temp = kScratchReg;
- __ LoadP(temp, src, r0);
+ __ LoadU64(temp, src, r0);
__ StoreP(temp, g.ToMemOperand(destination), r0);
}
} else if (source->IsConstant()) {
@@ -4494,8 +4422,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsSimd128StackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(dst.offset()));
- __ StoreSimd128(g.ToSimd128Register(source), MemOperand(dst.ra(), ip),
- r0, kScratchSimd128Reg);
+ __ StoreSimd128(g.ToSimd128Register(source), MemOperand(dst.ra(), ip));
}
} else {
DoubleRegister src = g.ToDoubleRegister(source);
@@ -4526,7 +4453,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
__ mov(ip, Operand(src.offset()));
__ LoadSimd128(g.ToSimd128Register(destination),
- MemOperand(src.ra(), ip), r0, kScratchSimd128Reg);
+ MemOperand(src.ra(), ip));
}
} else {
LocationOperand* op = LocationOperand::cast(source);
@@ -4541,15 +4468,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
// push v0, to be used as scratch
__ addi(sp, sp, Operand(-kSimd128Size));
- __ StoreSimd128(v0, MemOperand(r0, sp), r0, kScratchSimd128Reg);
+ __ StoreSimd128(v0, MemOperand(r0, sp));
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
__ mov(ip, Operand(src.offset()));
- __ LoadSimd128(v0, MemOperand(src.ra(), ip), r0, kScratchSimd128Reg);
+ __ LoadSimd128(v0, MemOperand(src.ra(), ip));
__ mov(ip, Operand(dst.offset()));
- __ StoreSimd128(v0, MemOperand(dst.ra(), ip), r0, kScratchSimd128Reg);
+ __ StoreSimd128(v0, MemOperand(dst.ra(), ip));
// restore v0
- __ LoadSimd128(v0, MemOperand(r0, sp), ip, kScratchSimd128Reg);
+ __ LoadSimd128(v0, MemOperand(r0, sp));
__ addi(sp, sp, Operand(kSimd128Size));
}
}
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 787cc2a27da..ccbce18045c 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -255,15 +255,9 @@ namespace compiler {
V(PPC_I64x2Add) \
V(PPC_I64x2Sub) \
V(PPC_I64x2Mul) \
- V(PPC_I64x2MinS) \
- V(PPC_I64x2MinU) \
- V(PPC_I64x2MaxS) \
- V(PPC_I64x2MaxU) \
V(PPC_I64x2Eq) \
V(PPC_I64x2Ne) \
V(PPC_I64x2GtS) \
- V(PPC_I64x2GtU) \
- V(PPC_I64x2GeU) \
V(PPC_I64x2GeS) \
V(PPC_I64x2Shl) \
V(PPC_I64x2ShrS) \
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index 749905a0551..e6c59fef222 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -177,15 +177,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_I64x2Add:
case kPPC_I64x2Sub:
case kPPC_I64x2Mul:
- case kPPC_I64x2MinS:
- case kPPC_I64x2MinU:
- case kPPC_I64x2MaxS:
- case kPPC_I64x2MaxU:
case kPPC_I64x2Eq:
case kPPC_I64x2Ne:
case kPPC_I64x2GtS:
- case kPPC_I64x2GtU:
- case kPPC_I64x2GeU:
case kPPC_I64x2GeS:
case kPPC_I64x2Shl:
case kPPC_I64x2ShrS:
diff --git a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index 849723bdac5..d4ef1086997 100644
--- a/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/chromium/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -224,6 +224,7 @@ void InstructionSelector::VisitLoad(Node* node) {
// Vectors do not support MRI mode, only MRR is available.
mode = kNoImmediate;
break;
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -356,6 +357,7 @@ void InstructionSelector::VisitStore(Node* node) {
// Vectors do not support MRI mode, only MRR is available.
mode = kNoImmediate;
break;
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -2642,6 +2644,12 @@ void InstructionSelector::VisitStoreLane(Node* node) {
Emit(opcode | AddressingModeField::encode(kMode_MRR), 0, nullptr, 4, inputs);
}
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/register-allocation.h b/chromium/v8/src/compiler/backend/register-allocation.h
index f4a7558b04a..37173da1fc3 100644
--- a/chromium/v8/src/compiler/backend/register-allocation.h
+++ b/chromium/v8/src/compiler/backend/register-allocation.h
@@ -65,6 +65,7 @@ inline int ByteWidthForStackSlot(MachineRepresentation rep) {
case MachineRepresentation::kSimd128:
return kSimd128Size;
case MachineRepresentation::kNone:
+ case MachineRepresentation::kMapWord:
break;
}
UNREACHABLE();
diff --git a/chromium/v8/src/compiler/backend/register-allocator-verifier.cc b/chromium/v8/src/compiler/backend/register-allocator-verifier.cc
index 1587f0ee182..b4099c5fad5 100644
--- a/chromium/v8/src/compiler/backend/register-allocator-verifier.cc
+++ b/chromium/v8/src/compiler/backend/register-allocator-verifier.cc
@@ -72,7 +72,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
constraints_.reserve(sequence->instructions().size());
// TODO(dcarney): model unique constraints.
// Construct OperandConstraints for all InstructionOperands, eliminating
- // kSameAsFirst along the way.
+ // kSameAsInput along the way.
for (const Instruction* instr : sequence->instructions()) {
// All gaps should be totally unallocated at this point.
VerifyEmptyGaps(instr);
@@ -90,10 +90,11 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
}
for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
- if (op_constraints[count].type_ == kSameAsFirst) {
- CHECK_LT(0, instr->InputCount());
- op_constraints[count].type_ = op_constraints[0].type_;
- op_constraints[count].value_ = op_constraints[0].value_;
+ if (op_constraints[count].type_ == kSameAsInput) {
+ int input_index = op_constraints[count].value_;
+ CHECK_LT(input_index, instr->InputCount());
+ op_constraints[count].type_ = op_constraints[input_index].type_;
+ op_constraints[count].value_ = op_constraints[input_index].value_;
}
VerifyOutput(op_constraints[count]);
}
@@ -105,7 +106,7 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
+ CHECK_NE(kSameAsInput, constraint.type_);
if (constraint.type_ != kImmediate) {
CHECK_NE(InstructionOperand::kInvalidVirtualRegister,
constraint.virtual_register_);
@@ -114,7 +115,7 @@ void RegisterAllocatorVerifier::VerifyInput(
void RegisterAllocatorVerifier::VerifyTemp(
const OperandConstraint& constraint) {
- CHECK_NE(kSameAsFirst, constraint.type_);
+ CHECK_NE(kSameAsInput, constraint.type_);
CHECK_NE(kImmediate, constraint.type_);
CHECK_NE(kConstant, constraint.type_);
}
@@ -212,8 +213,9 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->value_ =
ElementSizeLog2Of(sequence()->GetRepresentation(vreg));
break;
- case UnallocatedOperand::SAME_AS_FIRST_INPUT:
- constraint->type_ = kSameAsFirst;
+ case UnallocatedOperand::SAME_AS_INPUT:
+ constraint->type_ = kSameAsInput;
+ constraint->value_ = unallocated->input_index();
break;
}
}
@@ -269,7 +271,7 @@ void RegisterAllocatorVerifier::CheckConstraint(
CHECK_WITH_MSG(op->IsRegister() || op->IsStackSlot() || op->IsConstant(),
caller_info_);
return;
- case kSameAsFirst:
+ case kSameAsInput:
CHECK_WITH_MSG(false, caller_info_);
return;
}
diff --git a/chromium/v8/src/compiler/backend/register-allocator-verifier.h b/chromium/v8/src/compiler/backend/register-allocator-verifier.h
index 11bd4924f4e..c812642d338 100644
--- a/chromium/v8/src/compiler/backend/register-allocator-verifier.h
+++ b/chromium/v8/src/compiler/backend/register-allocator-verifier.h
@@ -219,7 +219,7 @@ class RegisterAllocatorVerifier final : public ZoneObject {
kRegisterOrSlot,
kRegisterOrSlotFP,
kRegisterOrSlotOrConstant,
- kSameAsFirst,
+ kSameAsInput,
kRegisterAndSlot
};
diff --git a/chromium/v8/src/compiler/backend/register-allocator.cc b/chromium/v8/src/compiler/backend/register-allocator.cc
index 3cd6cd98de0..0649fd5fd08 100644
--- a/chromium/v8/src/compiler/backend/register-allocator.cc
+++ b/chromium/v8/src/compiler/backend/register-allocator.cc
@@ -1373,21 +1373,6 @@ TopLevelLiveRange* TopTierRegisterAllocationData::NewLiveRange(
return allocation_zone()->New<TopLevelLiveRange>(index, rep);
}
-int TopTierRegisterAllocationData::GetNextLiveRangeId() {
- int vreg = virtual_register_count_++;
- if (vreg >= static_cast<int>(live_ranges().size())) {
- live_ranges().resize(vreg + 1, nullptr);
- }
- return vreg;
-}
-
-TopLevelLiveRange* TopTierRegisterAllocationData::NextLiveRange(
- MachineRepresentation rep) {
- int vreg = GetNextLiveRangeId();
- TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
- return ret;
-}
-
TopTierRegisterAllocationData::PhiMapValue*
TopTierRegisterAllocationData::InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi) {
@@ -1747,6 +1732,7 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
Instruction* second = code()->InstructionAt(instr_index);
// Handle fixed input operands of second instruction.
+ ZoneVector<TopLevelLiveRange*>* spilled_consts = nullptr;
for (size_t i = 0; i < second->InputCount(); i++) {
InstructionOperand* input = second->InputAt(i);
if (input->IsImmediate()) {
@@ -1757,8 +1743,19 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
TopLevelLiveRange* range =
data()->GetOrCreateLiveRangeFor(cur_input->virtual_register());
if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
+ bool already_spilled = false;
+ if (spilled_consts == nullptr) {
+ spilled_consts =
+ allocation_zone()->New<ZoneVector<TopLevelLiveRange*>>(
+ allocation_zone());
+ } else {
+ auto it =
+ std::find(spilled_consts->begin(), spilled_consts->end(), range);
+ already_spilled = it != spilled_consts->end();
+ }
auto it = data()->slot_for_const_range().find(range);
if (it == data()->slot_for_const_range().end()) {
+ DCHECK(!already_spilled);
int width = ByteWidthForStackSlot(range->representation());
int index = data()->frame()->AllocateSpillSlot(width);
auto* slot = AllocatedOperand::New(allocation_zone(),
@@ -1766,13 +1763,15 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
range->representation(), index);
it = data()->slot_for_const_range().emplace(range, slot).first;
}
- auto* slot = it->second;
- int input_vreg = cur_input->virtual_register();
- UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
- input_vreg);
- // Spill at every use position for simplicity. This case is very rare -
- // the only known instance is crbug.com/1146880.
- data()->AddGapMove(instr_index, Instruction::END, input_copy, *slot);
+ if (!already_spilled) {
+ auto* slot = it->second;
+ int input_vreg = cur_input->virtual_register();
+ UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
+ input_vreg);
+ // Spill at every use position for simplicity, this case is very rare.
+ data()->AddGapMove(instr_index, Instruction::END, input_copy, *slot);
+ spilled_consts->push_back(range);
+ }
}
}
if (cur_input->HasFixedPolicy()) {
@@ -1792,7 +1791,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
if (!second_output->HasSameAsInputPolicy()) continue;
DCHECK_EQ(0, i); // Only valid for first output.
UnallocatedOperand* cur_input =
- UnallocatedOperand::cast(second->InputAt(0));
+ UnallocatedOperand::cast(second->InputAt(second_output->input_index()));
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
@@ -3958,7 +3957,8 @@ void LinearScanAllocator::FindFreeRegistersForRange(
// interesting to this range anyway.
// TODO(mtrofin): extend to aliased ranges, too.
if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
- positions[cur_reg] <= cur_inactive->NextStart()) {
+ (positions[cur_reg] <= cur_inactive->NextStart() ||
+ range->End() <= cur_inactive->NextStart())) {
break;
}
LifetimePosition next_intersection =
diff --git a/chromium/v8/src/compiler/backend/register-allocator.h b/chromium/v8/src/compiler/backend/register-allocator.h
index 214a1d1308f..a9c3ca5ae0c 100644
--- a/chromium/v8/src/compiler/backend/register-allocator.h
+++ b/chromium/v8/src/compiler/backend/register-allocator.h
@@ -305,7 +305,6 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
TopLevelLiveRange* GetOrCreateLiveRangeFor(int index);
// Creates a new live range.
TopLevelLiveRange* NewLiveRange(int index, MachineRepresentation rep);
- TopLevelLiveRange* NextLiveRange(MachineRepresentation rep);
SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range,
SpillMode spill_mode);
@@ -356,8 +355,6 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
}
private:
- int GetNextLiveRangeId();
-
Zone* const allocation_zone_;
Frame* const frame_;
InstructionSequence* const code_;
diff --git a/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index f01941883ed..007edd7a96d 100644
--- a/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/chromium/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -157,18 +157,16 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
zone_(gen->zone()) {}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
__ Add64(scratch1_, object_, index_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore ra if the frame was elided.
__ Push(ra);
@@ -488,6 +486,11 @@ void CodeGenerator::AssemblePrepareTailCall() {
frame_access_state()->SetFrameAccessToSP();
}
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
@@ -606,8 +609,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Address wasm_code = static_cast<Address>(constant.ToInt64());
__ Call(wasm_code, constant.rmode());
} else {
- __ Add64(kScratchReg, i.InputRegister(0), 0);
- __ Call(kScratchReg);
+ __ Add64(t6, i.InputRegister(0), 0);
+ __ Call(t6);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -676,7 +679,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -689,7 +693,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -816,6 +821,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode());
__ Add64(kScratchReg, object, index);
__ Sd(value, MemOperand(kScratchReg));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -1122,6 +1130,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvCmp:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
+ case kRiscvCmpZero:
+ // Pseudo-instruction used for cmpzero/branch. No opcode emitted here.
+ break;
case kRiscvMov:
// TODO(plind): Should we combine mov/li like this, or use separate instr?
// - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
@@ -1841,7 +1852,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
UNIMPLEMENTED();
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
#define UNSUPPORTED_COND(opcode, condition) \
StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
@@ -1902,6 +1913,9 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
} else if (instr->arch_opcode() == kRiscvCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ } else if (instr->arch_opcode() == kRiscvCmpZero) {
+ cc = FlagsConditionToConditionCmp(condition);
+ __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
Register lhs_register = sp;
@@ -1957,6 +1971,12 @@ void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
__ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
}
return;
+ case kRiscvCmpZero: {
+ __ CompareI(kScratchReg, i.InputRegister(0), Operand(zero_reg),
+ FlagsConditionToConditionCmp(condition));
+ __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
+ }
+ return;
case kRiscvTst: {
switch (condition) {
case kEqual:
@@ -2224,6 +2244,68 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
UNREACHABLE();
}
return;
+ } else if (instr->arch_opcode() == kRiscvCmpZero) {
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq: {
+ Register left = i.InputRegister(0);
+ __ Sltu(result, left, 1);
+ break;
+ }
+ case ne: {
+ Register left = i.InputRegister(0);
+ __ Sltu(result, zero_reg, left);
+ break;
+ }
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = Operand(zero_reg);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Operand left = i.InputOperand(0);
+ __ Slt(result, zero_reg, left);
+ if (cc == le) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case Uless:
+ case Ugreater_equal: {
+ Register left = i.InputRegister(0);
+ Operand right = Operand(zero_reg);
+ __ Sltu(result, left, right);
+ if (cc == Ugreater_equal) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ case Ugreater:
+ case Uless_equal: {
+ Register left = zero_reg;
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == Uless_equal) {
+ __ Xor(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(0);
+ __ Sub64(lhs_register, sp, offset);
+ }
+ __ Sgtu(result, lhs_register, Operand(i.InputRegister(0)));
+ return;
} else if (instr->arch_opcode() == kRiscvCmpD ||
instr->arch_opcode() == kRiscvCmpS) {
FPURegister left = i.InputOrZeroDoubleRegister(0);
@@ -2346,7 +2428,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
@@ -2448,7 +2530,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
g.ToRegister(additional_pop_count),
Operand(static_cast<int64_t>(0)));
@@ -2510,7 +2592,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
__ Ret();
}
-void CodeGenerator::FinishCode() {}
+void CodeGenerator::FinishCode() { __ ForceConstantPoolEmissionWithoutJump(); }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) {}
diff --git a/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 61921d15855..f230ce3377e 100644
--- a/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/chromium/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -59,6 +59,7 @@ namespace compiler {
V(RiscvMov) \
V(RiscvTst) \
V(RiscvCmp) \
+ V(RiscvCmpZero) \
V(RiscvCmpS) \
V(RiscvAddS) \
V(RiscvSubS) \
diff --git a/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index b83942ffce8..c9210ded8fd 100644
--- a/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/chromium/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -32,6 +32,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvCeilWS:
case kRiscvClz32:
case kRiscvCmp:
+ case kRiscvCmpZero:
case kRiscvCmpD:
case kRiscvCmpS:
case kRiscvCtz32:
@@ -812,7 +813,7 @@ int MultiPushFPULatency() {
int PushCallerSavedLatency(SaveFPRegsMode fp_mode) {
int latency = MultiPushLatency();
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
latency += MultiPushFPULatency();
}
return latency;
@@ -836,7 +837,7 @@ int MultiPopFPULatency() {
int PopCallerSavedLatency(SaveFPRegsMode fp_mode) {
int latency = MultiPopLatency();
- if (fp_mode == kSaveFPRegs) {
+ if (fp_mode == SaveFPRegsMode::kSave) {
latency += MultiPopFPULatency();
}
return latency;
diff --git a/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 1d6b506685e..83865fde131 100644
--- a/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/chromium/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -459,6 +459,7 @@ void InstructionSelector::VisitLoad(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -533,6 +534,7 @@ void InstructionSelector::VisitStore(Node* node) {
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1214,7 +1216,9 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
InstructionCode opcode = kArchNop;
@@ -1242,7 +1246,8 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
DCHECK_NE(node->opcode(), IrOpcode::kPhi);
- if (node->opcode() == IrOpcode::kLoad) {
+ if (node->opcode() == IrOpcode::kLoad ||
+ node->opcode() == IrOpcode::kLoadImmutable) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
if (load_rep.IsUnsigned()) {
switch (load_rep.representation()) {
@@ -1598,6 +1603,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1651,6 +1657,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -1878,8 +1885,7 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
FlagsContinuation* cont) {
RiscvOperandGenerator g(selector);
- selector->EmitWithContinuation(kRiscvCmp, g.UseRegister(value),
- g.TempImmediate(0), cont);
+ selector->EmitWithContinuation(kRiscvCmpZero, g.UseRegister(value), cont);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
@@ -2654,7 +2660,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I32x4AllTrue, kRiscvI32x4AllTrue) \
V(I16x8AllTrue, kRiscvI16x8AllTrue) \
V(I8x16AllTrue, kRiscvI8x16AllTrue) \
- V(I64x2AllTrue, kRiscvI64x2AllTrue) \
+ V(I64x2AllTrue, kRiscvI64x2AllTrue)
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl) \
@@ -3015,6 +3021,12 @@ VISIT_EXT_MUL(I16x8, I8x16S)
VISIT_EXT_MUL(I16x8, I8x16U)
#undef VISIT_EXT_MUL
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
index c5e16b3e49c..a6c7dda9b48 100644
--- a/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -206,9 +206,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
zone_(gen->zone()) {}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
@@ -222,10 +219,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ AddS64(scratch1_, object_, offset_);
}
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (must_save_lr_) {
// We need to save and restore r14 if the frame was elided.
__ Push(r14);
@@ -1207,7 +1205,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -1220,7 +1219,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -1374,6 +1374,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DetermineStubCallMode(), &unwinding_info_writer_);
__ StoreTaggedField(value, MemOperand(object, offset));
}
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -1692,16 +1695,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_UNARY_OP(D_DInstr(sqdbr), nullInstr, nullInstr);
break;
case kS390_FloorFloat:
- __ fiebra(ROUND_TOWARD_NEG_INF, i.OutputDoubleRegister(),
- i.InputDoubleRegister(0));
+ __ FloorF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_CeilFloat:
- __ fiebra(ROUND_TOWARD_POS_INF, i.OutputDoubleRegister(),
- i.InputDoubleRegister(0));
+ __ CeilF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_TruncateFloat:
- __ fiebra(ROUND_TOWARD_0, i.OutputDoubleRegister(),
- i.InputDoubleRegister(0));
+ __ TruncF32(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
// Double operations
case kS390_ModDouble:
@@ -1797,16 +1797,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_FloorDouble:
- __ fidbra(ROUND_TOWARD_NEG_INF, i.OutputDoubleRegister(),
- i.InputDoubleRegister(0));
+ __ FloorF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_CeilDouble:
- __ fidbra(ROUND_TOWARD_POS_INF, i.OutputDoubleRegister(),
- i.InputDoubleRegister(0));
+ __ CeilF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_TruncateDouble:
- __ fidbra(ROUND_TOWARD_0, i.OutputDoubleRegister(),
- i.InputDoubleRegister(0));
+ __ TruncF64(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kS390_RoundDouble:
__ fidbra(ROUND_TO_NEAREST_AWAY_FROM_0, i.OutputDoubleRegister(),
@@ -2992,12 +2989,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I32x4GeS: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(2));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(2));
+ __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(2));
+ __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(2));
break;
}
case kS390_I32x4GtU: {
@@ -3020,12 +3015,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I16x8GeS: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(1));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(1));
+ __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(1));
+ __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(1));
break;
}
case kS390_I16x8GtU: {
@@ -3048,12 +3041,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_I8x16GeS: {
- __ vceq(kScratchDoubleReg, i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- __ vch(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), Condition(0), Condition(0));
- __ vo(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg, Condition(0), Condition(0), Condition(0));
+ __ vch(kScratchDoubleReg, i.InputSimd128Register(1),
+ i.InputSimd128Register(0), Condition(0), Condition(0));
+ __ vno(i.OutputSimd128Register(), kScratchDoubleReg, kScratchDoubleReg,
+ Condition(0), Condition(0), Condition(0));
break;
}
case kS390_I8x16GtU: {
@@ -3289,11 +3280,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kS390_S128Const: {
- for (int index = 0, j = 0; index < 2; index++, j = +2) {
- __ mov(index < 1 ? ip : r0, Operand(i.InputInt32(j)));
- __ iihf(index < 1 ? ip : r0, Operand(i.InputInt32(j + 1)));
- }
- __ vlvgp(i.OutputSimd128Register(), r0, ip);
+ uint64_t low = make_uint64(i.InputUint32(1), i.InputUint32(0));
+ uint64_t high = make_uint64(i.InputUint32(3), i.InputUint32(2));
+ __ mov(r0, Operand(low));
+ __ mov(ip, Operand(high));
+ __ vlvgp(i.OutputSimd128Register(), ip, r0);
break;
}
case kS390_S128Zero: {
@@ -3561,15 +3552,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Simd128Register dst = i.OutputSimd128Register(),
src0 = i.InputSimd128Register(0),
src1 = i.InputSimd128Register(1);
- int32_t k8x16_indices[] = {i.InputInt32(2), i.InputInt32(3),
- i.InputInt32(4), i.InputInt32(5)};
- // create 2 * 8 byte inputs indicating new indices
- for (int i = 0, j = 0; i < 2; i++, j = +2) {
- __ mov(i < 1 ? ip : r0, Operand(k8x16_indices[j]));
- __ iihf(i < 1 ? ip : r0, Operand(k8x16_indices[j + 1]));
- }
- __ vlvgp(kScratchDoubleReg, r0, ip);
- __ vperm(dst, src0, src1, kScratchDoubleReg, Condition(0), Condition(0));
+ uint64_t low = make_uint64(i.InputUint32(3), i.InputUint32(2));
+ uint64_t high = make_uint64(i.InputUint32(5), i.InputUint32(4));
+ __ mov(r0, Operand(low));
+ __ mov(ip, Operand(high));
+ __ vlvgp(dst, ip, r0);
+ __ vperm(dst, src0, src1, dst, Condition(0), Condition(0));
break;
}
case kS390_I8x16Swizzle: {
@@ -3712,80 +3700,65 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0), Condition(0), Condition(2));
break;
}
-#define ASSEMBLE_SIMD_I64X2_EXT_MUL(UNPACK_INSTR) \
- __ UNPACK_INSTR(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0), \
- Condition(0), Condition(2)); \
- __ UNPACK_INSTR(i.OutputSimd128Register(), i.InputSimd128Register(1), \
- Condition(0), Condition(0), Condition(2)); \
- Register scratch_0 = r0; \
- Register scratch_1 = r1; \
- for (int lane = 0; lane < 2; lane++) { \
- __ vlgv(scratch_0, kScratchDoubleReg, MemOperand(r0, lane), Condition(3)); \
- __ vlgv(scratch_1, i.OutputSimd128Register(), MemOperand(r0, lane), \
- Condition(3)); \
- __ MulS64(scratch_0, scratch_1); \
- scratch_0 = r1; \
- scratch_1 = ip; \
- } \
- __ vlvgp(i.OutputSimd128Register(), r0, r1);
+#define EXT_MUL(mul_even, mul_odd, merge, mode) \
+ Simd128Register dst = i.OutputSimd128Register(), \
+ src0 = i.InputSimd128Register(0), \
+ src1 = i.InputSimd128Register(1); \
+ __ mul_even(dst, src0, src1, Condition(0), Condition(0), Condition(mode)); \
+ __ mul_odd(kScratchDoubleReg, src0, src1, Condition(0), Condition(0), \
+ Condition(mode)); \
+ __ merge(dst, dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(mode + 1));
case kS390_I64x2ExtMulLowI32x4S: {
- ASSEMBLE_SIMD_I64X2_EXT_MUL(vupl)
+ EXT_MUL(vme, vmo, vmrl, 2)
break;
}
case kS390_I64x2ExtMulHighI32x4S: {
- ASSEMBLE_SIMD_I64X2_EXT_MUL(vuph)
+ EXT_MUL(vme, vmo, vmrh, 2)
break;
}
case kS390_I64x2ExtMulLowI32x4U: {
- ASSEMBLE_SIMD_I64X2_EXT_MUL(vupll)
+ EXT_MUL(vmle, vmlo, vmrl, 2)
break;
}
case kS390_I64x2ExtMulHighI32x4U: {
- ASSEMBLE_SIMD_I64X2_EXT_MUL(vuplh)
- break;
- }
-#undef ASSEMBLE_SIMD_I64X2_EXT_MUL
-#define ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(UNPACK_INSTR, MODE) \
- __ UNPACK_INSTR(kScratchDoubleReg, i.InputSimd128Register(0), Condition(0), \
- Condition(0), Condition(MODE)); \
- __ UNPACK_INSTR(i.OutputSimd128Register(), i.InputSimd128Register(1), \
- Condition(0), Condition(0), Condition(MODE)); \
- __ vml(i.OutputSimd128Register(), kScratchDoubleReg, \
- i.OutputSimd128Register(), Condition(0), Condition(0), \
- Condition(MODE + 1));
+ EXT_MUL(vmle, vmlo, vmrh, 2)
+ break;
+ }
case kS390_I32x4ExtMulLowI16x8S: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vupl, 1)
+ EXT_MUL(vme, vmo, vmrl, 1)
break;
}
case kS390_I32x4ExtMulHighI16x8S: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vuph, 1)
+ EXT_MUL(vme, vmo, vmrh, 1)
break;
}
case kS390_I32x4ExtMulLowI16x8U: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vupll, 1)
+ EXT_MUL(vmle, vmlo, vmrl, 1)
break;
}
case kS390_I32x4ExtMulHighI16x8U: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vuplh, 1)
+ EXT_MUL(vmle, vmlo, vmrh, 1)
break;
}
+
case kS390_I16x8ExtMulLowI8x16S: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vupl, 0)
+ EXT_MUL(vme, vmo, vmrl, 0)
break;
}
case kS390_I16x8ExtMulHighI8x16S: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vuph, 0)
+ EXT_MUL(vme, vmo, vmrh, 0)
break;
}
case kS390_I16x8ExtMulLowI8x16U: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vupll, 0)
+ EXT_MUL(vmle, vmlo, vmrl, 0)
break;
}
case kS390_I16x8ExtMulHighI8x16U: {
- ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL(vuplh, 0)
+ EXT_MUL(vmle, vmlo, vmrh, 0)
break;
}
-#undef ASSEMBLE_SIMD_I32X4_I16X8_EXT_MUL
+#undef EXT_MUL
#define EXT_ADD_PAIRWISE(lane_size, mul_even, mul_odd) \
Simd128Register src = i.InputSimd128Register(0); \
Simd128Register dst = i.OutputSimd128Register(); \
@@ -3942,7 +3915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
UNREACHABLE();
}
return kSuccess;
-} // NOLINT(readability/fn_size)
+}
// Assembles branches after an instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
@@ -4204,7 +4177,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
ResetSpeculationPoison();
@@ -4309,7 +4282,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ CmpS64(g.ToRegister(additional_pop_count), Operand(0));
__ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue);
}
diff --git a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index f2375525bb3..8be70adf547 100644
--- a/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/chromium/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -141,7 +141,8 @@ class S390OperandGenerator final : public OperandGenerator {
bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
int effect_level) {
- if (input->opcode() != IrOpcode::kLoad ||
+ if ((input->opcode() != IrOpcode::kLoad &&
+ input->opcode() != IrOpcode::kLoadImmutable) ||
!selector()->CanCover(user, input)) {
return false;
}
@@ -320,6 +321,7 @@ ArchOpcode SelectLoadOpcode(Node* node) {
case MachineRepresentation::kSimd128:
opcode = kS390_LoadSimd128;
break;
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
default:
UNREACHABLE();
@@ -798,6 +800,7 @@ static void VisitGeneralStore(
value = value->InputAt(0);
}
break;
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
UNREACHABLE();
}
@@ -2849,6 +2852,12 @@ void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
index 814f14fb4af..11bba2f0f80 100644
--- a/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -276,9 +276,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
- if (mode_ > RecordWriteMode::kValueIsPointer) {
- __ JumpIfSmi(value_, exit());
- }
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
@@ -288,10 +285,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ leaq(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
- mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
- : OMIT_REMEMBERED_SET;
- SaveFPRegsMode const save_fp_mode =
- frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
__ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode);
@@ -1016,7 +1014,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK(IsAligned(bytes, kSystemPointerSize));
@@ -1029,7 +1028,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
- DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
@@ -1190,6 +1190,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ StoreTaggedField(operand, value);
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
@@ -1478,7 +1481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
__ Pcmpeqd(tmp, tmp);
- __ Psrlq(tmp, 33);
+ __ Psrlq(tmp, byte{33});
__ Andps(i.OutputDoubleRegister(), tmp);
break;
}
@@ -1486,7 +1489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// TODO(bmeurer): Use RIP relative 128-bit constants.
XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
__ Pcmpeqd(tmp, tmp);
- __ Psllq(tmp, 31);
+ __ Psllq(tmp, byte{31});
__ Xorps(i.OutputDoubleRegister(), tmp);
break;
}
@@ -1734,7 +1737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cvttss2siq(i.OutputRegister(), i.InputOperand(0));
}
if (instr->OutputCount() > 1) {
- __ Set(i.OutputRegister(1), 1);
+ __ Move(i.OutputRegister(1), 1);
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<float>(INT64_MIN));
@@ -1752,7 +1755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// INT64_MIN, then the conversion fails.
__ j(no_overflow, &done, Label::kNear);
__ bind(&fail);
- __ Set(i.OutputRegister(1), 0);
+ __ Move(i.OutputRegister(1), 0);
__ bind(&done);
}
break;
@@ -1763,7 +1766,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Cvttsd2siq(i.OutputRegister(0), i.InputOperand(0));
}
if (instr->OutputCount() > 1) {
- __ Set(i.OutputRegister(1), 1);
+ __ Move(i.OutputRegister(1), 1);
Label done;
Label fail;
__ Move(kScratchDoubleReg, static_cast<double>(INT64_MIN));
@@ -1781,31 +1784,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// INT64_MIN, then the conversion fails.
__ j(no_overflow, &done, Label::kNear);
__ bind(&fail);
- __ Set(i.OutputRegister(1), 0);
+ __ Move(i.OutputRegister(1), 0);
__ bind(&done);
}
break;
case kSSEFloat32ToUint64: {
Label fail;
- if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 0);
+ if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttss2uiq(i.OutputRegister(), i.InputDoubleRegister(0), &fail);
} else {
__ Cvttss2uiq(i.OutputRegister(), i.InputOperand(0), &fail);
}
- if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 1);
+ if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 1);
__ bind(&fail);
break;
}
case kSSEFloat64ToUint64: {
Label fail;
- if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 0);
+ if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 0);
if (instr->InputAt(0)->IsFPRegister()) {
__ Cvttsd2uiq(i.OutputRegister(), i.InputDoubleRegister(0), &fail);
} else {
__ Cvttsd2uiq(i.OutputRegister(), i.InputOperand(0), &fail);
}
- if (instr->OutputCount() > 1) __ Set(i.OutputRegister(1), 1);
+ if (instr->OutputCount() > 1) __ Move(i.OutputRegister(1), 1);
__ bind(&fail);
break;
}
@@ -2390,21 +2393,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2ExtractLane: {
- DoubleRegister dst = i.OutputDoubleRegister();
- XMMRegister src = i.InputSimd128Register(0);
- uint8_t lane = i.InputUint8(1);
- if (lane == 0) {
- __ Move(dst, src);
- } else {
- DCHECK_EQ(1, lane);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- // Pass src as operand to avoid false-dependency on dst.
- __ vmovhlps(dst, src, src);
- } else {
- __ movhlps(dst, src);
- }
- }
+ __ F64x2ExtractLane(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputUint8(1));
+ break;
+ }
+ case kX64F64x2ReplaceLane: {
+ __ F64x2ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputDoubleRegister(2), i.InputInt8(1));
break;
}
case kX64F64x2Sqrt: {
@@ -2428,42 +2423,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Min: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the resuls, and adjust.
- __ Movapd(kScratchDoubleReg, src1);
- __ Minpd(kScratchDoubleReg, dst);
- __ Minpd(dst, src1);
- // propagate -0's and NaNs, which may be non-canonical.
- __ Orpd(kScratchDoubleReg, dst);
- // Canonicalize NaNs by quieting and clearing the payload.
- __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
- __ Orpd(kScratchDoubleReg, dst);
- __ Psrlq(dst, 13);
- __ Andnpd(dst, kScratchDoubleReg);
+ // Avoids a move in no-AVX case if dst = src0.
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ F64x2Min(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F64x2Max: {
- XMMRegister src1 = i.InputSimd128Register(1),
- dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the resuls, and adjust.
- __ Movapd(kScratchDoubleReg, src1);
- __ Maxpd(kScratchDoubleReg, dst);
- __ Maxpd(dst, src1);
- // Find discrepancies.
- __ Xorpd(dst, kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- __ Orpd(kScratchDoubleReg, dst);
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- __ Subpd(kScratchDoubleReg, dst);
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmppd(dst, kScratchDoubleReg, int8_t{3});
- __ Psrlq(dst, 13);
- __ Andnpd(dst, kScratchDoubleReg);
+ // Avoids a move in no-AVX case if dst = src0.
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ F64x2Max(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64F64x2Eq: {
@@ -2534,43 +2504,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputDoubleRegister(0);
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vbroadcastss(dst, src);
- } else if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vshufps(dst, src, src, 0);
- } else {
- if (dst == src) {
- // 1 byte shorter than pshufd.
- __ shufps(dst, src, 0);
- } else {
- __ pshufd(dst, src, 0);
- }
- }
+ __ F32x4Splat(i.OutputSimd128Register(), i.InputDoubleRegister(0));
break;
}
case kX64F32x4ExtractLane: {
- XMMRegister dst = i.OutputDoubleRegister();
- XMMRegister src = i.InputSimd128Register(0);
- uint8_t lane = i.InputUint8(1);
- DCHECK_LT(lane, 4);
- // These instructions are shorter than insertps, but will leave junk in
- // the top lanes of dst.
- if (lane == 0) {
- __ Move(dst, src);
- } else if (lane == 1) {
- __ Movshdup(dst, src);
- } else if (lane == 2 && dst == src) {
- // Check dst == src to avoid false dependency on dst.
- __ Movhlps(dst, src);
- } else if (dst == src) {
- __ Shufps(dst, src, src, lane);
- } else {
- __ Pshufd(dst, src, lane);
- }
+ __ F32x4ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
+ i.InputUint8(1));
break;
}
case kX64F32x4ReplaceLane: {
@@ -2671,7 +2610,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// propagate -0's and NaNs, which may be non-canonical.
__ Orps(kScratchDoubleReg, dst);
// Canonicalize NaNs by quieting and clearing the payload.
- __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
+ __ Cmpunordps(dst, kScratchDoubleReg);
__ Orps(kScratchDoubleReg, dst);
__ Psrld(dst, byte{10});
__ Andnps(dst, kScratchDoubleReg);
@@ -2693,7 +2632,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// Propagate sign discrepancy and (subtle) quiet NaNs.
__ Subps(kScratchDoubleReg, dst);
// Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- __ Cmpps(dst, kScratchDoubleReg, int8_t{3});
+ __ Cmpunordps(dst, kScratchDoubleReg);
__ Psrld(dst, byte{10});
__ Andnps(dst, kScratchDoubleReg);
break;
@@ -2786,14 +2725,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I64x2Neg: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Movdqa(kScratchDoubleReg, src);
- src = kScratchDoubleReg;
- }
- __ Pxor(dst, dst);
- __ Psubq(dst, src);
+ __ I64x2Neg(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64I64x2BitMask: {
@@ -2843,15 +2776,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movdqa(tmp2, right);
// Multiply high dword of each qword of left with right.
- __ Psrlq(tmp1, 32);
+ __ Psrlq(tmp1, byte{32});
__ Pmuludq(tmp1, right);
// Multiply high dword of each qword of right with left.
- __ Psrlq(tmp2, 32);
+ __ Psrlq(tmp2, byte{32});
__ Pmuludq(tmp2, left);
__ Paddq(tmp2, tmp1);
- __ Psllq(tmp2, 32);
+ __ Psllq(tmp2, byte{32});
__ Pmuludq(left, right);
__ Paddq(left, tmp2); // left == dst
@@ -3675,15 +3608,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64S128Not: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- if (dst == src) {
- __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ Pxor(dst, kScratchDoubleReg);
- } else {
- __ Pcmpeqd(dst, dst);
- __ Pxor(dst, src);
- }
+ __ S128Not(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64S128Select: {
@@ -4360,7 +4286,7 @@ void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
__ decl(rax);
__ j(not_zero, &nodeopt, Label::kNear);
- __ Set(rax, FLAG_deopt_every_n_times);
+ __ Move(rax, FLAG_deopt_every_n_times);
__ store_rax(counter);
__ popq(rax);
__ popfq();
@@ -4451,7 +4377,43 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
void CodeGenerator::AssembleArchSelect(Instruction* instr,
FlagsCondition condition) {
- UNIMPLEMENTED();
+ X64OperandConverter i(this, instr);
+ MachineRepresentation rep =
+ LocationOperand::cast(instr->OutputAt(0))->representation();
+ Condition cc = FlagsConditionToCondition(condition);
+ DCHECK_EQ(i.OutputRegister(), i.InputRegister(instr->InputCount() - 2));
+ size_t last_input = instr->InputCount() - 1;
+ // kUnorderedNotEqual can be implemented more efficiently than
+ // kUnorderedEqual. As the OR of two flags, it can be done with just two
+ // cmovs. If the condition was originally a kUnorderedEqual, expect the
+ // instruction selector to have inverted it and swapped the input.
+ DCHECK_NE(condition, kUnorderedEqual);
+ if (rep == MachineRepresentation::kWord32) {
+ if (HasRegisterInput(instr, last_input)) {
+ __ cmovl(cc, i.OutputRegister(), i.InputRegister(last_input));
+ if (condition == kUnorderedNotEqual) {
+ __ cmovl(parity_even, i.OutputRegister(), i.InputRegister(last_input));
+ }
+ } else {
+ __ cmovl(cc, i.OutputRegister(), i.InputOperand(last_input));
+ if (condition == kUnorderedNotEqual) {
+ __ cmovl(parity_even, i.OutputRegister(), i.InputOperand(last_input));
+ }
+ }
+ } else {
+ DCHECK_EQ(rep, MachineRepresentation::kWord64);
+ if (HasRegisterInput(instr, last_input)) {
+ __ cmovq(cc, i.OutputRegister(), i.InputRegister(last_input));
+ if (condition == kUnorderedNotEqual) {
+ __ cmovq(parity_even, i.OutputRegister(), i.InputRegister(last_input));
+ }
+ } else {
+ __ cmovq(cc, i.OutputRegister(), i.InputOperand(last_input));
+ if (condition == kUnorderedNotEqual) {
+ __ cmovq(parity_even, i.OutputRegister(), i.InputOperand(last_input));
+ }
+ }
+ }
}
namespace {
@@ -4464,13 +4426,11 @@ void CodeGenerator::FinishFrame(Frame* frame) {
CallDescriptor* call_descriptor = linkage()->GetIncomingDescriptor();
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
- if (saves_fp != 0) {
+ if (saves_fp != 0) { // Save callee-saved XMM registers.
frame->AlignSavedCalleeRegisterSlots();
- if (saves_fp != 0) { // Save callee-saved XMM registers.
- const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
- frame->AllocateSavedCalleeRegisterSlots(
- saves_fp_count * (kQuadWordSize / kSystemPointerSize));
- }
+ const uint32_t saves_fp_count = base::bits::CountPopulation(saves_fp);
+ frame->AllocateSavedCalleeRegisterSlots(
+ saves_fp_count * (kQuadWordSize / kSystemPointerSize));
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
if (saves != 0) { // Save callee-saved registers.
@@ -4540,7 +4500,7 @@ void CodeGenerator::AssembleConstructFrame() {
// frame is still on the stack. Optimized code uses OSR values directly from
// the unoptimized frame. Thus, all that needs to be done is to allocate the
// remaining stack slots.
- if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
+ __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
ResetSpeculationPoison();
@@ -4664,7 +4624,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
if (parameter_slots != 0) {
if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
- } else if (__ emit_debug_code()) {
+ } else if (FLAG_debug_code) {
__ cmpq(g.ToRegister(additional_pop_count), Immediate(0));
__ Assert(equal, AbortReason::kUnexpectedAdditionalPopValue);
}
@@ -4787,7 +4747,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (RelocInfo::IsWasmReference(src.rmode())) {
__ movq(dst, Immediate64(src.ToInt64(), src.rmode()));
} else {
- __ Set(dst, src.ToInt64());
+ __ Move(dst, src.ToInt64());
}
break;
case Constant::kFloat32:
@@ -4837,7 +4797,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ movq(dst, Immediate(src.ToInt32()));
return;
case Constant::kInt64:
- __ Set(dst, src.ToInt64());
+ __ Move(dst, src.ToInt64());
return;
default:
break;
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
index 2ad717c0a04..eba23dcfa92 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/chromium/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -156,6 +156,7 @@ namespace compiler {
V(X64Peek) \
V(X64F64x2Splat) \
V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
V(X64F64x2Abs) \
V(X64F64x2Neg) \
V(X64F64x2Sqrt) \
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index dc323d4cc79..4fada93a312 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -132,6 +132,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Pinsrq:
case kX64F64x2Splat:
case kX64F64x2ExtractLane:
+ case kX64F64x2ReplaceLane:
case kX64F64x2Abs:
case kX64F64x2Neg:
case kX64F64x2Sqrt:
diff --git a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 23bbed36137..40eddf31316 100644
--- a/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/chromium/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -66,7 +66,8 @@ class X64OperandGenerator final : public OperandGenerator {
bool CanBeMemoryOperand(InstructionCode opcode, Node* node, Node* input,
int effect_level) {
- if (input->opcode() != IrOpcode::kLoad ||
+ if ((input->opcode() != IrOpcode::kLoad &&
+ input->opcode() != IrOpcode::kLoadImmutable) ||
!selector()->CanCover(node, input)) {
return false;
}
@@ -298,6 +299,7 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64Movdqu;
break;
case MachineRepresentation::kNone:
+ case MachineRepresentation::kMapWord:
UNREACHABLE();
}
return opcode;
@@ -332,6 +334,7 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
case MachineRepresentation::kSimd128: // Fall through.
return kX64Movdqu;
case MachineRepresentation::kNone:
+ case MachineRepresentation::kMapWord:
UNREACHABLE();
}
UNREACHABLE();
@@ -465,6 +468,7 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(!load_rep.IsMapWord());
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
@@ -479,6 +483,7 @@ void InstructionSelector::VisitStore(Node* node) {
Node* value = node->InputAt(2);
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
if (FLAG_enable_unconditional_write_barriers &&
@@ -704,7 +709,8 @@ void InstructionSelector::VisitStackPointerGreaterThan(
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
if (g.CanBeMemoryOperand(kX64Cmp, node, value, effect_level)) {
- DCHECK_EQ(IrOpcode::kLoad, value->opcode());
+ DCHECK(IrOpcode::kLoad == value->opcode() ||
+ IrOpcode::kLoadImmutable == value->opcode());
// GetEffectiveAddressMemoryOperand can create at most 3 inputs.
static constexpr int kMaxInputCount = 3;
@@ -726,7 +732,9 @@ namespace {
bool TryMergeTruncateInt64ToInt32IntoLoad(InstructionSelector* selector,
Node* node, Node* load) {
- if (load->opcode() == IrOpcode::kLoad && selector->CanCover(node, load)) {
+ if ((load->opcode() == IrOpcode::kLoad ||
+ load->opcode() == IrOpcode::kLoadImmutable) &&
+ selector->CanCover(node, load)) {
LoadRepresentation load_rep = LoadRepresentationOf(load->op());
MachineRepresentation rep = load_rep.representation();
InstructionCode opcode;
@@ -1363,7 +1371,9 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
InstructionCode opcode;
@@ -1870,14 +1880,25 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
InstructionCode opcode, Node* left,
InstructionOperand right,
FlagsContinuation* cont) {
- DCHECK_EQ(IrOpcode::kLoad, left->opcode());
+ DCHECK(IrOpcode::kLoad == left->opcode() ||
+ IrOpcode::kLoadImmutable == left->opcode());
X64OperandGenerator g(selector);
size_t input_count = 0;
- InstructionOperand inputs[4];
+ InstructionOperand inputs[6];
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(left, inputs, &input_count);
opcode |= AddressingModeField::encode(addressing_mode);
inputs[input_count++] = right;
+ if (cont->IsSelect()) {
+ if (opcode == kUnorderedEqual) {
+ cont->Negate();
+ inputs[input_count++] = g.UseRegister(cont->true_value());
+ inputs[input_count++] = g.Use(cont->false_value());
+ } else {
+ inputs[input_count++] = g.UseRegister(cont->false_value());
+ inputs[input_count++] = g.Use(cont->true_value());
+ }
+ }
selector->EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
}
@@ -1886,6 +1907,20 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
InstructionOperand left, InstructionOperand right,
FlagsContinuation* cont) {
+ if (cont->IsSelect()) {
+ X64OperandGenerator g(selector);
+ InstructionOperand inputs[4] = {left, right};
+ if (cont->condition() == kUnorderedEqual) {
+ cont->Negate();
+ inputs[2] = g.UseRegister(cont->true_value());
+ inputs[3] = g.Use(cont->false_value());
+ } else {
+ inputs[2] = g.UseRegister(cont->false_value());
+ inputs[3] = g.Use(cont->true_value());
+ }
+ selector->EmitWithContinuation(opcode, 0, nullptr, 4, inputs, cont);
+ return;
+ }
selector->EmitWithContinuation(opcode, left, right, cont);
}
@@ -1901,7 +1936,8 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
}
MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
- if (hint_node->opcode() == IrOpcode::kLoad) {
+ if (hint_node->opcode() == IrOpcode::kLoad ||
+ hint_node->opcode() == IrOpcode::kLoadImmutable) {
MachineType hint = LoadRepresentationOf(hint_node->op());
if (node->opcode() == IrOpcode::kInt32Constant ||
node->opcode() == IrOpcode::kInt64Constant) {
@@ -1935,8 +1971,10 @@ MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
}
}
}
- return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
- : MachineType::None();
+ return node->opcode() == IrOpcode::kLoad ||
+ node->opcode() == IrOpcode::kLoadImmutable
+ ? LoadRepresentationOf(node->op())
+ : MachineType::None();
}
// Tries to match the size of the given opcode to that of the operands, if
@@ -2151,7 +2189,8 @@ void VisitCompareZero(InstructionSelector* selector, Node* user, Node* node,
}
}
int effect_level = selector->GetEffectLevel(node, cont);
- if (node->opcode() == IrOpcode::kLoad) {
+ if (node->opcode() == IrOpcode::kLoad ||
+ node->opcode() == IrOpcode::kLoadImmutable) {
switch (LoadRepresentationOf(node->op()).representation()) {
case MachineRepresentation::kWord8:
if (opcode == kX64Cmp32) {
@@ -2920,7 +2959,6 @@ VISIT_ATOMIC_BINOP(Xor)
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4DemoteF64x2Zero) \
- V(I64x2Neg) \
V(I64x2BitMask) \
V(I64x2SConvertI32x4Low) \
V(I64x2SConvertI32x4High) \
@@ -3045,6 +3083,16 @@ void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
g.Use(node->InputAt(1)));
}
+void InstructionSelector::VisitF64x2ReplaceLane(Node* node) {
+ X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node->op());
+ // When no-AVX, define dst == src to save a move.
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kX64F64x2ReplaceLane, dst, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(lane), g.UseRegister(node->InputAt(1)));
+}
+
#define VISIT_SIMD_REPLACE_LANE(TYPE, OPCODE) \
void InstructionSelector::Visit##TYPE##ReplaceLane(Node* node) { \
X64OperandGenerator g(this); \
@@ -3054,7 +3102,6 @@ void InstructionSelector::VisitF32x4ReplaceLane(Node* node) {
}
#define SIMD_TYPES_FOR_REPLACE_LANE(V) \
- V(F64x2, kX64Pinsrq) \
V(I64x2, kX64Pinsrq) \
V(I32x4, kX64Pinsrd) \
V(I16x8, kX64Pinsrw) \
@@ -3186,6 +3233,15 @@ VISIT_SIMD_QFMOP(F32x4Qfma)
VISIT_SIMD_QFMOP(F32x4Qfms)
#undef VISIT_SIMD_QFMOP
+void InstructionSelector::VisitI64x2Neg(Node* node) {
+ X64OperandGenerator g(this);
+ // If AVX unsupported, make sure dst != src to avoid a move.
+ InstructionOperand operand0 = IsSupported(AVX)
+ ? g.UseRegister(node->InputAt(0))
+ : g.UseUnique(node->InputAt(0));
+ Emit(kX64I64x2Neg, g.DefineAsRegister(node), operand0);
+}
+
void InstructionSelector::VisitI64x2ShrS(Node* node) {
X64OperandGenerator g(this);
InstructionOperand temps[] = {g.TempRegister()};
@@ -3707,13 +3763,22 @@ void InstructionSelector::VisitI64x2Abs(Node* node) {
}
}
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ continuation_outputs_.push_back(
+ g->DefineSameAsInput(node, first_input_index));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kWord32Ctz | MachineOperatorBuilder::kWord64Ctz |
- MachineOperatorBuilder::kWord32Rol | MachineOperatorBuilder::kWord64Rol;
+ MachineOperatorBuilder::kWord32Rol | MachineOperatorBuilder::kWord64Rol |
+ MachineOperatorBuilder::kWord32Select |
+ MachineOperatorBuilder::kWord64Select;
if (CpuFeatures::IsSupported(POPCNT)) {
flags |= MachineOperatorBuilder::kWord32Popcnt |
MachineOperatorBuilder::kWord64Popcnt;
diff --git a/chromium/v8/src/compiler/bytecode-graph-builder.cc b/chromium/v8/src/compiler/bytecode-graph-builder.cc
index 54996bb4755..faae9c1eab0 100644
--- a/chromium/v8/src/compiler/bytecode-graph-builder.cc
+++ b/chromium/v8/src/compiler/bytecode-graph-builder.cc
@@ -27,7 +27,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/smi.h"
-#include "src/objects/template-objects.h"
+#include "src/objects/template-objects-inl.h"
namespace v8 {
namespace internal {
@@ -72,7 +72,8 @@ class BytecodeGraphBuilder {
CodeKind code_kind() const { return code_kind_; }
bool native_context_independent() const {
- return CodeKindIsNativeContextIndependentJSFunction(code_kind_);
+ // TODO(jgruber,v8:8888): Remove dependent code.
+ return false;
}
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
bool generate_full_feedback_collection() const {
@@ -346,6 +347,10 @@ class BytecodeGraphBuilder {
// feedback. Returns kDisallowSpeculation if feedback is insufficient.
SpeculationMode GetSpeculationMode(int slot_id) const;
+ // Helper function to determine the call feedback relation from the recorded
+ // type feedback. Returns kUnrelated if feedback is insufficient.
+ CallFeedbackRelation ComputeCallFeedbackRelation(int slot_id) const;
+
// Helpers for building the implicit FunctionEntry and IterationBody
// StackChecks.
void BuildFunctionEntryStackCheck();
@@ -412,10 +417,15 @@ class BytecodeGraphBuilder {
int context_register_; // Index of register holding handler context.
};
- Handle<Object> GetConstantForIndexOperand(int operand_index) const {
- return broker_->CanonicalPersistentHandle(
- bytecode_iterator().GetConstantForIndexOperand(operand_index,
- local_isolate_));
+ template <class T = Object>
+ typename ref_traits<T>::ref_type MakeRefForConstantForIndexOperand(
+ int operand_index) {
+ // The BytecodeArray itself was fetched by using a barrier so all reads
+ // from the constant pool are safe.
+ return MakeRefAssumeMemoryFence(
+ broker(), broker()->CanonicalPersistentHandle(Handle<T>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(
+ operand_index, local_isolate_))));
}
Graph* graph() const { return jsgraph_->graph(); }
@@ -428,9 +438,7 @@ class BytecodeGraphBuilder {
return jsgraph_->simplified();
}
Zone* local_zone() const { return local_zone_; }
- BytecodeArrayRef bytecode_array() const {
- return shared_info().GetBytecodeArray();
- }
+ BytecodeArrayRef bytecode_array() const { return bytecode_array_; }
FeedbackVectorRef const& feedback_vector() const { return feedback_vector_; }
const JSTypeHintLowering& type_hint_lowering() const {
return type_hint_lowering_;
@@ -480,6 +488,7 @@ class BytecodeGraphBuilder {
// The native context for which we optimize.
NativeContextRef const native_context_;
SharedFunctionInfoRef const shared_info_;
+ BytecodeArrayRef const bytecode_array_;
FeedbackCellRef const feedback_cell_;
FeedbackVectorRef const feedback_vector_;
CallFrequency const invocation_frequency_;
@@ -1065,6 +1074,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
jsgraph_(jsgraph),
native_context_(native_context),
shared_info_(shared_info),
+ bytecode_array_(shared_info.GetBytecodeArray()),
feedback_cell_(feedback_cell),
feedback_vector_(feedback_cell.value()->AsFeedbackVector()),
invocation_frequency_(invocation_frequency),
@@ -1569,7 +1579,7 @@ void BytecodeGraphBuilder::VisitBytecodes() {
}
// TODO(leszeks): Increment usage counter on BG thread.
- if (!FLAG_concurrent_inlining && has_one_shot_bytecode) {
+ if (!broker()->is_concurrent_inlining() && has_one_shot_bytecode) {
// (For concurrent inlining this is done in the serializer instead.)
isolate()->CountUsage(
v8::Isolate::UseCounterFeature::kOptimizedFunctionWithOneShotBytecode);
@@ -1589,8 +1599,7 @@ void BytecodeGraphBuilder::VisitLdaSmi() {
}
void BytecodeGraphBuilder::VisitLdaConstant() {
- ObjectRef object(broker(), GetConstantForIndexOperand(0),
- ObjectRef::BackgroundSerialization::kAllowed);
+ ObjectRef object = MakeRefForConstantForIndexOperand(0);
Node* node = jsgraph()->Constant(object);
environment()->BindAccumulator(node);
}
@@ -1660,25 +1669,24 @@ Node* BytecodeGraphBuilder::BuildLoadGlobal(NameRef name,
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- NameRef name(broker(), GetConstantForIndexOperand(0));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(0);
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node =
- BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
+ BuildLoadGlobal(name, feedback_slot_index, TypeofMode::kNotInside);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- NameRef name(broker(), GetConstantForIndexOperand(0));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(0);
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
- Node* node =
- BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
+ Node* node = BuildLoadGlobal(name, feedback_slot_index, TypeofMode::kInside);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitStaGlobal() {
PrepareEagerCheckpoint();
- NameRef name(broker(), GetConstantForIndexOperand(0));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(0);
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
Node* value = environment()->LookupAccumulator();
@@ -1819,10 +1827,9 @@ void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
PrepareEagerCheckpoint();
- Node* name =
- jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
+ Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
const Operator* op =
- javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+ javascript()->CallRuntime(typeof_mode == TypeofMode::kNotInside
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotInsideTypeof);
Node* value = NewNode(op, name);
@@ -1830,11 +1837,11 @@ void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
}
void BytecodeGraphBuilder::VisitLdaLookupSlot() {
- BuildLdaLookupSlot(TypeofMode::NOT_INSIDE_TYPEOF);
+ BuildLdaLookupSlot(TypeofMode::kNotInside);
}
void BytecodeGraphBuilder::VisitLdaLookupSlotInsideTypeof() {
- BuildLdaLookupSlot(TypeofMode::INSIDE_TYPEOF);
+ BuildLdaLookupSlot(TypeofMode::kInside);
}
BytecodeGraphBuilder::Environment*
@@ -1870,13 +1877,13 @@ base::Optional<ScopeInfoRef> BytecodeGraphBuilder::TryGetScopeInfo() {
Node* context = environment()->Context();
switch (context->opcode()) {
case IrOpcode::kJSCreateFunctionContext:
- return ScopeInfoRef(
+ return MakeRef(
broker(),
CreateFunctionContextParametersOf(context->op()).scope_info());
case IrOpcode::kJSCreateBlockContext:
case IrOpcode::kJSCreateCatchContext:
case IrOpcode::kJSCreateWithContext:
- return ScopeInfoRef(broker(), ScopeInfoOf(context->op()));
+ return MakeRef(broker(), ScopeInfoOf(context->op()));
case IrOpcode::kParameter: {
ScopeInfoRef scope_info = shared_info_.scope_info();
if (scope_info.HasOuterScopeInfo()) {
@@ -1972,11 +1979,10 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
// Slow path, do a runtime load lookup.
set_environment(slow_environment);
{
- Node* name = jsgraph()->Constant(
- ObjectRef(broker(), GetConstantForIndexOperand(0)));
+ Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
const Operator* op =
- javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+ javascript()->CallRuntime(typeof_mode == TypeofMode::kNotInside
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotInsideTypeof);
Node* value = NewNode(op, name);
@@ -1992,11 +1998,11 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
}
void BytecodeGraphBuilder::VisitLdaLookupContextSlot() {
- BuildLdaLookupContextSlot(TypeofMode::NOT_INSIDE_TYPEOF);
+ BuildLdaLookupContextSlot(TypeofMode::kNotInside);
}
void BytecodeGraphBuilder::VisitLdaLookupContextSlotInsideTypeof() {
- BuildLdaLookupContextSlot(TypeofMode::INSIDE_TYPEOF);
+ BuildLdaLookupContextSlot(TypeofMode::kInside);
}
void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
@@ -2008,7 +2014,7 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- NameRef name(broker(), GetConstantForIndexOperand(0));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(0);
uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
@@ -2024,10 +2030,10 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
set_environment(slow_environment);
{
Node* name =
- jsgraph()->Constant(NameRef(broker(), GetConstantForIndexOperand(0)));
+ jsgraph()->Constant(MakeRefForConstantForIndexOperand<Name>(0));
const Operator* op =
- javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
+ javascript()->CallRuntime(typeof_mode == TypeofMode::kNotInside
? Runtime::kLoadLookupSlot
: Runtime::kLoadLookupSlotInsideTypeof);
Node* value = NewNode(op, name);
@@ -2043,18 +2049,17 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
}
void BytecodeGraphBuilder::VisitLdaLookupGlobalSlot() {
- BuildLdaLookupGlobalSlot(TypeofMode::NOT_INSIDE_TYPEOF);
+ BuildLdaLookupGlobalSlot(TypeofMode::kNotInside);
}
void BytecodeGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() {
- BuildLdaLookupGlobalSlot(TypeofMode::INSIDE_TYPEOF);
+ BuildLdaLookupGlobalSlot(TypeofMode::kInside);
}
void BytecodeGraphBuilder::VisitStaLookupSlot() {
PrepareEagerCheckpoint();
Node* value = environment()->LookupAccumulator();
- Node* name =
- jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
+ Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
int bytecode_flags = bytecode_iterator().GetFlagOperand(1);
LanguageMode language_mode = static_cast<LanguageMode>(
interpreter::StoreLookupSlotFlags::LanguageModeBit::decode(
@@ -2078,7 +2083,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(), GetConstantForIndexOperand(1));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(1);
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name.object(), feedback);
@@ -2102,7 +2107,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyNoFeedback() {
PrepareEagerCheckpoint();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(), GetConstantForIndexOperand(1));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(1);
const Operator* op = javascript()->LoadNamed(name.object(), FeedbackSource());
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
Node* node = NewNode(op, object, feedback_vector_node());
@@ -2114,7 +2119,7 @@ void BytecodeGraphBuilder::VisitLdaNamedPropertyFromSuper() {
Node* receiver =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
Node* home_object = environment()->LookupAccumulator();
- NameRef name(broker(), GetConstantForIndexOperand(1));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(1);
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
@@ -2168,7 +2173,7 @@ void BytecodeGraphBuilder::BuildNamedStore(StoreMode store_mode) {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(), GetConstantForIndexOperand(1));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(1);
FeedbackSource feedback =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(2));
@@ -2209,7 +2214,7 @@ void BytecodeGraphBuilder::VisitStaNamedPropertyNoFeedback() {
Node* value = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- NameRef name(broker(), GetConstantForIndexOperand(1));
+ NameRef name = MakeRefForConstantForIndexOperand<Name>(1);
LanguageMode language_mode =
static_cast<LanguageMode>(bytecode_iterator().GetFlagOperand(2));
const Operator* op =
@@ -2288,7 +2293,8 @@ void BytecodeGraphBuilder::VisitPopContext() {
}
void BytecodeGraphBuilder::VisitCreateClosure() {
- SharedFunctionInfoRef shared_info(broker(), GetConstantForIndexOperand(0));
+ SharedFunctionInfoRef shared_info =
+ MakeRefForConstantForIndexOperand<SharedFunctionInfo>(0);
AllocationType allocation =
interpreter::CreateClosureFlags::PretenuredBit::decode(
bytecode_iterator().GetFlagOperand(2))
@@ -2305,14 +2311,14 @@ void BytecodeGraphBuilder::VisitCreateClosure() {
}
void BytecodeGraphBuilder::VisitCreateBlockContext() {
- ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
+ ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0);
const Operator* op = javascript()->CreateBlockContext(scope_info.object());
Node* context = NewNode(op);
environment()->BindAccumulator(context);
}
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
- ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
+ ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0);
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op = javascript()->CreateFunctionContext(
scope_info.object(), slots, FUNCTION_SCOPE);
@@ -2321,7 +2327,7 @@ void BytecodeGraphBuilder::VisitCreateFunctionContext() {
}
void BytecodeGraphBuilder::VisitCreateEvalContext() {
- ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(0));
+ ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(0);
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(1);
const Operator* op = javascript()->CreateFunctionContext(scope_info.object(),
slots, EVAL_SCOPE);
@@ -2332,7 +2338,7 @@ void BytecodeGraphBuilder::VisitCreateEvalContext() {
void BytecodeGraphBuilder::VisitCreateCatchContext() {
interpreter::Register reg = bytecode_iterator().GetRegisterOperand(0);
Node* exception = environment()->LookupRegister(reg);
- ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(1));
+ ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(1);
const Operator* op = javascript()->CreateCatchContext(scope_info.object());
Node* context = NewNode(op, exception);
@@ -2342,7 +2348,7 @@ void BytecodeGraphBuilder::VisitCreateCatchContext() {
void BytecodeGraphBuilder::VisitCreateWithContext() {
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- ScopeInfoRef scope_info(broker(), GetConstantForIndexOperand(1));
+ ScopeInfoRef scope_info = MakeRefForConstantForIndexOperand<ScopeInfo>(1);
const Operator* op = javascript()->CreateWithContext(scope_info.object());
Node* context = NewNode(op, object);
@@ -2368,7 +2374,7 @@ void BytecodeGraphBuilder::VisitCreateRestParameter() {
}
void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
- StringRef constant_pattern(broker(), GetConstantForIndexOperand(0));
+ StringRef constant_pattern = MakeRefForConstantForIndexOperand<String>(0);
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int literal_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2381,8 +2387,8 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- ArrayBoilerplateDescriptionRef array_boilerplate_description(
- broker(), GetConstantForIndexOperand(0));
+ ArrayBoilerplateDescriptionRef array_boilerplate_description =
+ MakeRefForConstantForIndexOperand<ArrayBoilerplateDescription>(0);
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2420,8 +2426,8 @@ void BytecodeGraphBuilder::VisitCreateArrayFromIterable() {
}
void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
- ObjectBoilerplateDescriptionRef constant_properties(
- broker(), GetConstantForIndexOperand(0));
+ ObjectBoilerplateDescriptionRef constant_properties =
+ MakeRefForConstantForIndexOperand<ObjectBoilerplateDescription>(0);
int const slot_id = bytecode_iterator().GetIndexOperand(1);
FeedbackSource pair = CreateFeedbackSource(slot_id);
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
@@ -2459,8 +2465,8 @@ void BytecodeGraphBuilder::VisitCloneObject() {
void BytecodeGraphBuilder::VisitGetTemplateObject() {
FeedbackSource source =
CreateFeedbackSource(bytecode_iterator().GetIndexOperand(1));
- TemplateObjectDescriptionRef description(broker(),
- GetConstantForIndexOperand(0));
+ TemplateObjectDescriptionRef description =
+ MakeRefForConstantForIndexOperand<TemplateObjectDescription>(0);
STATIC_ASSERT(JSGetTemplateObjectNode::FeedbackVectorIndex() == 0);
const Operator* op = javascript()->GetTemplateObject(
description.object(), shared_info().object(), source);
@@ -2508,9 +2514,11 @@ void BytecodeGraphBuilder::BuildCall(ConvertReceiverMode receiver_mode,
FeedbackSource feedback = CreateFeedbackSource(slot_id);
CallFrequency frequency = ComputeCallFrequency(slot_id);
SpeculationMode speculation_mode = GetSpeculationMode(slot_id);
+ CallFeedbackRelation call_feedback_relation =
+ ComputeCallFeedbackRelation(slot_id);
const Operator* op =
javascript()->Call(arg_count, frequency, feedback, receiver_mode,
- speculation_mode, CallFeedbackRelation::kRelated);
+ speculation_mode, call_feedback_relation);
DCHECK(IrOpcode::IsFeedbackCollectingOpcode(op->opcode()));
JSTypeHintLowering::LoweringResult lowering = TryBuildSimplifiedCall(
@@ -2955,8 +2963,7 @@ void BytecodeGraphBuilder::VisitThrowReferenceErrorIfHole() {
Node* accumulator = environment()->LookupAccumulator();
Node* check_for_hole = NewNode(simplified()->ReferenceEqual(), accumulator,
jsgraph()->TheHoleConstant());
- Node* name =
- jsgraph()->Constant(ObjectRef(broker(), GetConstantForIndexOperand(0)));
+ Node* name = jsgraph()->Constant(MakeRefForConstantForIndexOperand(0));
BuildHoleCheckAndThrow(check_for_hole,
Runtime::kThrowAccessedUninitializedVariable, name);
}
@@ -3088,6 +3095,19 @@ SpeculationMode BytecodeGraphBuilder::GetSpeculationMode(int slot_id) const {
: feedback.AsCall().speculation_mode();
}
+CallFeedbackRelation BytecodeGraphBuilder::ComputeCallFeedbackRelation(
+ int slot_id) const {
+ FeedbackSlot slot = FeedbackVector::ToSlot(slot_id);
+ FeedbackSource source(feedback_vector(), slot);
+ ProcessedFeedback const& feedback = broker()->GetFeedbackForCall(source);
+ if (feedback.IsInsufficient()) return CallFeedbackRelation::kUnrelated;
+ CallFeedbackContent call_feedback_content =
+ feedback.AsCall().call_feedback_content();
+ return call_feedback_content == CallFeedbackContent::kTarget
+ ? CallFeedbackRelation::kTarget
+ : CallFeedbackRelation::kReceiver;
+}
+
void BytecodeGraphBuilder::VisitBitwiseNot() {
FeedbackSource feedback = CreateFeedbackSource(
bytecode_iterator().GetSlotOperand(kUnaryOperationHintIndex));
diff --git a/chromium/v8/src/compiler/code-assembler.cc b/chromium/v8/src/compiler/code-assembler.cc
index 4ec0c8f9d85..8ff1777366d 100644
--- a/chromium/v8/src/compiler/code-assembler.cc
+++ b/chromium/v8/src/compiler/code-assembler.cc
@@ -8,7 +8,7 @@
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler.h"
#include "src/compiler/backend/instruction-selector.h"
@@ -331,6 +331,10 @@ TNode<Float64T> CodeAssembler::Float64Constant(double value) {
return UncheckedCast<Float64T>(jsgraph()->Float64Constant(value));
}
+bool CodeAssembler::IsMapOffsetConstant(Node* node) {
+ return raw_assembler()->IsMapOffsetConstant(node);
+}
+
bool CodeAssembler::TryToInt32Constant(TNode<IntegralT> node,
int32_t* out_value) {
{
@@ -689,11 +693,15 @@ TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
LoadSensitivity needs_poisoning) {
+ // Please use LoadFromObject(MachineType::MapInHeader(), object,
+ // IntPtrConstant(-kHeapObjectTag)) instead.
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
}
Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset) {
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
return raw_assembler()->AtomicLoad(type, base, offset);
}
@@ -713,6 +721,27 @@ Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
return raw_assembler()->LoadFromObject(type, object, offset);
}
+#ifdef V8_MAP_PACKING
+Node* CodeAssembler::PackMapWord(Node* value) {
+ TNode<IntPtrT> map_word =
+ BitcastTaggedToWordForTagAndSmiBits(UncheckedCast<AnyTaggedT>(value));
+ TNode<WordT> packed = WordXor(UncheckedCast<WordT>(map_word),
+ IntPtrConstant(Internals::kMapWordXorMask));
+ return BitcastWordToTaggedSigned(packed);
+}
+#endif
+
+TNode<AnyTaggedT> CodeAssembler::LoadRootMapWord(RootIndex root_index) {
+#ifdef V8_MAP_PACKING
+ Handle<Object> root = isolate()->root_handle(root_index);
+ Node* map = HeapConstant(Handle<Map>::cast(root));
+ map = PackMapWord(map);
+ return ReinterpretCast<AnyTaggedT>(map);
+#else
+ return LoadRoot(root_index);
+#endif
+}
+
TNode<Object> CodeAssembler::LoadRoot(RootIndex root_index) {
if (RootsTable::IsImmortalImmovable(root_index)) {
Handle<Object> root = isolate()->root_handle(root_index);
@@ -794,11 +823,14 @@ void CodeAssembler::OptimizedStoreMap(TNode<HeapObject> object,
}
void CodeAssembler::Store(Node* base, Node* offset, Node* value) {
+ // Please use OptimizedStoreMap(base, value) instead.
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(MachineRepresentation::kTagged, base, offset, value,
kFullWriteBarrier);
}
void CodeAssembler::StoreEphemeronKey(Node* base, Node* offset, Node* value) {
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(MachineRepresentation::kTagged, base, offset, value,
kEphemeronKeyWriteBarrier);
}
@@ -812,6 +844,8 @@ void CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
void CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* offset, Node* value) {
+ // Please use OptimizedStoreMap(base, value) instead.
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(
rep, base, offset, value,
CanBeTaggedPointer(rep) ? kAssertNoWriteBarrier : kNoWriteBarrier);
@@ -825,6 +859,8 @@ void CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
void CodeAssembler::UnsafeStoreNoWriteBarrier(MachineRepresentation rep,
Node* base, Node* offset,
Node* value) {
+ // Please use OptimizedStoreMap(base, value) instead.
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
}
@@ -837,12 +873,15 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
TNode<IntPtrT> offset,
TNode<Object> tagged_value) {
+ // Please use OptimizedStoreMap(base, tagged_value) instead.
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
StoreNoWriteBarrier(MachineType::PointerRepresentation(), base, offset,
BitcastTaggedToWord(tagged_value));
}
void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value) {
+ DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
raw_assembler()->AtomicStore(rep, base, offset, value);
}
diff --git a/chromium/v8/src/compiler/code-assembler.h b/chromium/v8/src/compiler/code-assembler.h
index 9163295cd62..75cb1a95838 100644
--- a/chromium/v8/src/compiler/code-assembler.h
+++ b/chromium/v8/src/compiler/code-assembler.h
@@ -310,7 +310,6 @@ class CodeAssemblerParameterizedLabel;
V(Word64And, Word64T, Word64T, Word64T) \
V(Word64Or, Word64T, Word64T, Word64T) \
V(Word64Xor, Word64T, Word64T, Word64T) \
- V(Word64Ror, Word64T, Word64T, Word64T) \
V(Word64Shl, Word64T, Word64T, Word64T) \
V(Word64Shr, Word64T, Word64T, Word64T) \
V(Word64Sar, Word64T, Word64T, Word64T)
@@ -585,6 +584,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
return value ? Int32TrueConstant() : Int32FalseConstant();
}
+ bool IsMapOffsetConstant(Node* node);
+
bool TryToInt32Constant(TNode<IntegralT> node, int32_t* out_value);
bool TryToInt64Constant(TNode<IntegralT> node, int64_t* out_value);
bool TryToIntPtrConstant(TNode<IntegralT> node, intptr_t* out_value);
@@ -789,8 +790,16 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset);
+#ifdef V8_MAP_PACKING
+ Node* PackMapWord(Node* value);
+#endif
+
// Load a value from the root array.
+ // If map packing is enabled, LoadRoot for a root map returns the unpacked map
+ // word (i.e., the map). Use LoadRootMapWord to obtain the packed map word
+ // instead.
TNode<Object> LoadRoot(RootIndex root_index);
+ TNode<AnyTaggedT> LoadRootMapWord(RootIndex root_index);
template <typename Type>
TNode<Type> UnalignedLoad(TNode<RawPtrT> base, TNode<IntPtrT> offset) {
@@ -978,6 +987,11 @@ class V8_EXPORT_PRIVATE CodeAssembler {
static_cast<TNode<Word32T>>(right)));
}
+ TNode<IntPtrT> WordOr(TNode<IntPtrT> left, TNode<IntPtrT> right) {
+ return Signed(WordOr(static_cast<TNode<WordT>>(left),
+ static_cast<TNode<WordT>>(right)));
+ }
+
TNode<Int32T> Word32Or(TNode<Int32T> left, TNode<Int32T> right) {
return Signed(Word32Or(static_cast<TNode<Word32T>>(left),
static_cast<TNode<Word32T>>(right)));
@@ -995,6 +1009,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<BoolT> Word64Equal(TNode<Word64T> left, TNode<Word64T> right);
TNode<BoolT> Word64NotEqual(TNode<Word64T> left, TNode<Word64T> right);
+ TNode<IntPtrT> WordNot(TNode<IntPtrT> a) {
+ return Signed(WordNot(static_cast<TNode<WordT>>(a)));
+ }
TNode<BoolT> Word32Or(TNode<BoolT> left, TNode<BoolT> right) {
return UncheckedCast<BoolT>(Word32Or(static_cast<TNode<Word32T>>(left),
static_cast<TNode<Word32T>>(right)));
diff --git a/chromium/v8/src/compiler/common-operator-reducer.cc b/chromium/v8/src/compiler/common-operator-reducer.cc
index 874bdb0d323..f002f34a40d 100644
--- a/chromium/v8/src/compiler/common-operator-reducer.cc
+++ b/chromium/v8/src/compiler/common-operator-reducer.cc
@@ -8,10 +8,11 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
namespace v8 {
namespace internal {
@@ -28,7 +29,9 @@ Decision DecideCondition(JSHeapBroker* broker, Node* const cond) {
}
case IrOpcode::kHeapConstant: {
HeapObjectMatcher m(unwrapped);
- return m.Ref(broker).BooleanValue() ? Decision::kTrue : Decision::kFalse;
+ base::Optional<bool> maybe_result = m.Ref(broker).TryGetBooleanValue();
+ if (!maybe_result.has_value()) return Decision::kUnknown;
+ return *maybe_result ? Decision::kTrue : Decision::kFalse;
}
default:
return Decision::kUnknown;
@@ -53,7 +56,8 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
}
Reduction CommonOperatorReducer::Reduce(Node* node) {
- DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
+ DisallowHeapAccessIf no_heap_access(broker() == nullptr ||
+ !broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kBranch:
return ReduceBranch(node);
diff --git a/chromium/v8/src/compiler/common-operator.h b/chromium/v8/src/compiler/common-operator.h
index 77483b14e86..4115414908d 100644
--- a/chromium/v8/src/compiler/common-operator.h
+++ b/chromium/v8/src/compiler/common-operator.h
@@ -617,8 +617,8 @@ class FrameState : public CommonNodeWrapperBase {
// test, among others). Also, outer_frame_state points at the start node
// for non-inlined functions. This could be avoided by checking
// has_outer_frame_state() before casting to FrameState.
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kFrameState ||
- node->opcode() == IrOpcode::kStart);
+ DCHECK(node->opcode() == IrOpcode::kFrameState ||
+ node->opcode() == IrOpcode::kStart);
}
FrameStateInfo frame_state_info() const {
@@ -668,7 +668,7 @@ class FrameState : public CommonNodeWrapperBase {
class StartNode final : public CommonNodeWrapperBase {
public:
explicit constexpr StartNode(Node* node) : CommonNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kStart);
+ DCHECK_EQ(IrOpcode::kStart, node->opcode());
}
// The receiver is counted as part of formal parameters.
@@ -687,10 +687,10 @@ class StartNode final : public CommonNodeWrapperBase {
kExtraOutputCount);
// Checking related linkage methods here since they rely on Start node
// layout.
- CONSTEXPR_DCHECK(Linkage::kJSCallClosureParamIndex == -1);
- CONSTEXPR_DCHECK(Linkage::GetJSCallNewTargetParamIndex(argc) == argc + 0);
- CONSTEXPR_DCHECK(Linkage::GetJSCallArgCountParamIndex(argc) == argc + 1);
- CONSTEXPR_DCHECK(Linkage::GetJSCallContextParamIndex(argc) == argc + 2);
+ DCHECK_EQ(-1, Linkage::kJSCallClosureParamIndex);
+ DCHECK_EQ(argc + 0, Linkage::GetJSCallNewTargetParamIndex(argc));
+ DCHECK_EQ(argc + 1, Linkage::GetJSCallArgCountParamIndex(argc));
+ DCHECK_EQ(argc + 2, Linkage::GetJSCallContextParamIndex(argc));
return argc + kClosure + kNewTarget + kArgCount + kContext;
}
@@ -773,8 +773,7 @@ class DynamicCheckMapsWithDeoptUnlessNode final : public CommonNodeWrapperBase {
public:
explicit constexpr DynamicCheckMapsWithDeoptUnlessNode(Node* node)
: CommonNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() ==
- IrOpcode::kDynamicCheckMapsWithDeoptUnless);
+ DCHECK_EQ(IrOpcode::kDynamicCheckMapsWithDeoptUnless, node->opcode());
}
#define INPUTS(V) \
diff --git a/chromium/v8/src/compiler/compilation-dependencies.cc b/chromium/v8/src/compiler/compilation-dependencies.cc
index 3149fe490b9..8c73a759ff4 100644
--- a/chromium/v8/src/compiler/compilation-dependencies.cc
+++ b/chromium/v8/src/compiler/compilation-dependencies.cc
@@ -444,7 +444,7 @@ class ElementsKindDependency final : public CompilationDependency {
bool IsValid() const override {
Handle<AllocationSite> site = site_.object();
ElementsKind kind = site->PointsToLiteral()
- ? site->boilerplate().GetElementsKind()
+ ? site->boilerplate(kAcquireLoad).GetElementsKind()
: site->GetElementsKind();
return kind_ == kind;
}
@@ -580,38 +580,38 @@ bool CompilationDependencies::DependOnProtector(const PropertyCellRef& cell) {
}
bool CompilationDependencies::DependOnArrayBufferDetachingProtector() {
- return DependOnProtector(PropertyCellRef(
+ return DependOnProtector(MakeRef(
broker_,
broker_->isolate()->factory()->array_buffer_detaching_protector()));
}
bool CompilationDependencies::DependOnArrayIteratorProtector() {
- return DependOnProtector(PropertyCellRef(
+ return DependOnProtector(MakeRef(
broker_, broker_->isolate()->factory()->array_iterator_protector()));
}
bool CompilationDependencies::DependOnArraySpeciesProtector() {
- return DependOnProtector(PropertyCellRef(
+ return DependOnProtector(MakeRef(
broker_, broker_->isolate()->factory()->array_species_protector()));
}
bool CompilationDependencies::DependOnNoElementsProtector() {
- return DependOnProtector(PropertyCellRef(
- broker_, broker_->isolate()->factory()->no_elements_protector()));
+ return DependOnProtector(
+ MakeRef(broker_, broker_->isolate()->factory()->no_elements_protector()));
}
bool CompilationDependencies::DependOnPromiseHookProtector() {
- return DependOnProtector(PropertyCellRef(
+ return DependOnProtector(MakeRef(
broker_, broker_->isolate()->factory()->promise_hook_protector()));
}
bool CompilationDependencies::DependOnPromiseSpeciesProtector() {
- return DependOnProtector(PropertyCellRef(
+ return DependOnProtector(MakeRef(
broker_, broker_->isolate()->factory()->promise_species_protector()));
}
bool CompilationDependencies::DependOnPromiseThenProtector() {
- return DependOnProtector(PropertyCellRef(
+ return DependOnProtector(MakeRef(
broker_, broker_->isolate()->factory()->promise_then_protector()));
}
@@ -680,7 +680,7 @@ namespace {
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
while (true) {
- HeapObjectRef proto = map.prototype();
+ HeapObjectRef proto = map.prototype().value();
if (!proto.IsJSObject()) {
CHECK_EQ(proto.map().oddball_type(), OddballType::kNull);
break;
@@ -697,7 +697,7 @@ void CompilationDependencies::DependOnStablePrototypeChains(
MapContainer const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype) {
for (auto map : receiver_maps) {
- MapRef receiver_map(broker_, map);
+ MapRef receiver_map = MakeRef(broker_, map);
if (start == kStartAtReceiver) DependOnStableMap(receiver_map);
if (receiver_map.IsPrimitiveMap()) {
// Perform the implicit ToObject for primitives here.
diff --git a/chromium/v8/src/compiler/compilation-dependencies.h b/chromium/v8/src/compiler/compilation-dependencies.h
index 5cf2a3f94c4..d2acc352619 100644
--- a/chromium/v8/src/compiler/compilation-dependencies.h
+++ b/chromium/v8/src/compiler/compilation-dependencies.h
@@ -34,6 +34,10 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
V8_WARN_UNUSED_RESULT bool Commit(Handle<Code> code);
+ // TODO(jgruber): Remove this method once GetPropertyAccessInfo no longer
+ // uses the two-phase approach between serialization and compilation.
+ void ClearForConcurrentGetPropertyAccessInfo() { dependencies_.clear(); }
+
// Return the initial map of {function} and record the assumption that it
// stays the initial map.
MapRef DependOnInitialMap(const JSFunctionRef& function);
diff --git a/chromium/v8/src/compiler/constant-folding-reducer.cc b/chromium/v8/src/compiler/constant-folding-reducer.cc
index 1c672c700d5..3b187467db2 100644
--- a/chromium/v8/src/compiler/constant-folding-reducer.cc
+++ b/chromium/v8/src/compiler/constant-folding-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/constant-folding-reducer.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/objects/objects-inl.h"
namespace v8 {
@@ -63,7 +64,7 @@ ConstantFoldingReducer::ConstantFoldingReducer(Editor* editor, JSGraph* jsgraph,
ConstantFoldingReducer::~ConstantFoldingReducer() = default;
Reduction ConstantFoldingReducer::Reduce(Node* node) {
- DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
+ DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable) &&
node->opcode() != IrOpcode::kFinishRegion) {
diff --git a/chromium/v8/src/compiler/dead-code-elimination.cc b/chromium/v8/src/compiler/dead-code-elimination.cc
index 61552ba523c..9b5958c81a7 100644
--- a/chromium/v8/src/compiler/dead-code-elimination.cc
+++ b/chromium/v8/src/compiler/dead-code-elimination.cc
@@ -16,12 +16,14 @@ namespace compiler {
DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
- Zone* temp_zone)
+ Zone* temp_zone,
+ bool is_concurrent_inlining)
: AdvancedReducer(editor),
graph_(graph),
common_(common),
dead_(graph->NewNode(common->Dead())),
- zone_(temp_zone) {
+ zone_(temp_zone),
+ is_concurrent_inlining_(is_concurrent_inlining) {
NodeProperties::SetType(dead_, Type::None());
}
@@ -46,7 +48,7 @@ Node* FindDeadInput(Node* node) {
} // namespace
Reduction DeadCodeElimination::Reduce(Node* node) {
- DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
+ DisallowHeapAccessIf no_heap_access(!is_concurrent_inlining_);
switch (node->opcode()) {
case IrOpcode::kEnd:
return ReduceEnd(node);
diff --git a/chromium/v8/src/compiler/dead-code-elimination.h b/chromium/v8/src/compiler/dead-code-elimination.h
index 7fb22838c7b..3e6914d5e1f 100644
--- a/chromium/v8/src/compiler/dead-code-elimination.h
+++ b/chromium/v8/src/compiler/dead-code-elimination.h
@@ -40,7 +40,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
: public NON_EXPORTED_BASE(AdvancedReducer) {
public:
DeadCodeElimination(Editor* editor, Graph* graph,
- CommonOperatorBuilder* common, Zone* temp_zone);
+ CommonOperatorBuilder* common, Zone* temp_zone,
+ bool is_concurrent_inlining);
~DeadCodeElimination() final = default;
DeadCodeElimination(const DeadCodeElimination&) = delete;
DeadCodeElimination& operator=(const DeadCodeElimination&) = delete;
@@ -78,6 +79,8 @@ class V8_EXPORT_PRIVATE DeadCodeElimination final
CommonOperatorBuilder* const common_;
Node* const dead_;
Zone* zone_;
+
+ const bool is_concurrent_inlining_;
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/decompression-optimizer.cc b/chromium/v8/src/compiler/decompression-optimizer.cc
index 9b2362c9ef9..a8c29d51e4f 100644
--- a/chromium/v8/src/compiler/decompression-optimizer.cc
+++ b/chromium/v8/src/compiler/decompression-optimizer.cc
@@ -17,7 +17,8 @@ bool IsMachineLoad(Node* const node) {
const IrOpcode::Value opcode = node->opcode();
return opcode == IrOpcode::kLoad || opcode == IrOpcode::kPoisonedLoad ||
opcode == IrOpcode::kProtectedLoad ||
- opcode == IrOpcode::kUnalignedLoad;
+ opcode == IrOpcode::kUnalignedLoad ||
+ opcode == IrOpcode::kLoadImmutable;
}
bool IsTaggedMachineLoad(Node* const node) {
@@ -204,6 +205,10 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) {
case IrOpcode::kLoad:
NodeProperties::ChangeOp(node, machine()->Load(compressed_load_rep));
break;
+ case IrOpcode::kLoadImmutable:
+ NodeProperties::ChangeOp(node,
+ machine()->LoadImmutable(compressed_load_rep));
+ break;
case IrOpcode::kPoisonedLoad:
NodeProperties::ChangeOp(node,
machine()->PoisonedLoad(compressed_load_rep));
diff --git a/chromium/v8/src/compiler/effect-control-linearizer.cc b/chromium/v8/src/compiler/effect-control-linearizer.cc
index dedf68e93c2..9799691ce63 100644
--- a/chromium/v8/src/compiler/effect-control-linearizer.cc
+++ b/chromium/v8/src/compiler/effect-control-linearizer.cc
@@ -7,6 +7,7 @@
#include "include/v8-fast-api-calls.h"
#include "src/base/bits.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/common/ptr-compr-inl.h"
#include "src/compiler/access-builder.h"
@@ -16,11 +17,13 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/js-heap-broker.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/memory-lowering.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/compiler/schedule.h"
+#include "src/compiler/select-lowering.h"
#include "src/execution/frames.h"
#include "src/heap/factory-inl.h"
#include "src/objects/heap-number.h"
@@ -31,10 +34,13 @@ namespace v8 {
namespace internal {
namespace compiler {
+enum class MaintainSchedule { kMaintain, kDiscard };
+enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
+
class EffectControlLinearizer {
public:
EffectControlLinearizer(JSGraph* js_graph, Schedule* schedule,
- Zone* temp_zone,
+ JSGraphAssembler* graph_assembler, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
MaskArrayIndexEnable mask_array_index,
@@ -48,8 +54,7 @@ class EffectControlLinearizer {
source_positions_(source_positions),
node_origins_(node_origins),
broker_(broker),
- graph_assembler_(js_graph, temp_zone, base::nullopt,
- should_maintain_schedule() ? schedule : nullptr),
+ graph_assembler_(graph_assembler),
frame_state_zapper_(nullptr) {}
void Run();
@@ -112,7 +117,6 @@ class EffectControlLinearizer {
Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
- Node* LowerBigIntAsUintN(Node* node, Node* frame_state);
Node* LowerChangeUint64ToBigInt(Node* node);
Node* LowerTruncateBigIntToUint64(Node* node);
Node* LowerChangeTaggedToFloat64(Node* node);
@@ -192,6 +196,7 @@ class EffectControlLinearizer {
void LowerTransitionElementsKind(Node* node);
Node* LowerLoadFieldByIndex(Node* node);
Node* LowerLoadMessage(Node* node);
+ Node* AdaptFastCallArgument(Node* node, CTypeInfo::Type arg_type);
Node* LowerFastApiCall(Node* node);
Node* LowerLoadTypedElement(Node* node);
Node* LowerLoadDataViewElement(Node* node);
@@ -307,7 +312,7 @@ class EffectControlLinearizer {
return js_graph_->simplified();
}
MachineOperatorBuilder* machine() const { return js_graph_->machine(); }
- JSGraphAssembler* gasm() { return &graph_assembler_; }
+ JSGraphAssembler* gasm() const { return graph_assembler_; }
JSHeapBroker* broker() const { return broker_; }
JSGraph* js_graph_;
@@ -319,7 +324,7 @@ class EffectControlLinearizer {
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
JSHeapBroker* broker_;
- JSGraphAssembler graph_assembler_;
+ JSGraphAssembler* graph_assembler_;
Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
};
@@ -1059,9 +1064,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kCheckedTaggedToTaggedPointer:
result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
break;
- case IrOpcode::kBigIntAsUintN:
- result = LowerBigIntAsUintN(node, frame_state);
- break;
case IrOpcode::kChangeUint64ToBigInt:
result = LowerChangeUint64ToBigInt(node);
break;
@@ -2934,22 +2936,6 @@ Node* EffectControlLinearizer::LowerCheckBigInt(Node* node, Node* frame_state) {
return value;
}
-Node* EffectControlLinearizer::LowerBigIntAsUintN(Node* node,
- Node* frame_state) {
- DCHECK(machine()->Is64());
-
- const int bits = OpParameter<int>(node->op());
- DCHECK(0 <= bits && bits <= 64);
-
- if (bits == 64) {
- // Reduce to nop.
- return node->InputAt(0);
- } else {
- const uint64_t msk = (1ULL << bits) - 1ULL;
- return __ Word64And(node->InputAt(0), __ Int64Constant(msk));
- }
-}
-
Node* EffectControlLinearizer::LowerChangeUint64ToBigInt(Node* node) {
DCHECK(machine()->Is64());
@@ -3683,8 +3669,7 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(),
callable.descriptor().GetStackParameterCount(), flags, properties);
- return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj,
- __ NoContextConstant());
+ return __ Call(call_descriptor, __ HeapConstant(callable.code()), obj);
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
@@ -4974,7 +4959,8 @@ void EffectControlLinearizer::LowerStoreMessage(Node* node) {
__ StoreField(AccessBuilder::ForExternalIntPtr(), offset, object_pattern);
}
-static MachineType MachineTypeFor(CTypeInfo::Type type) {
+namespace {
+MachineType MachineTypeFor(CTypeInfo::Type type) {
switch (type) {
case CTypeInfo::Type::kVoid:
return MachineType::AnyTagged();
@@ -4993,9 +4979,34 @@ static MachineType MachineTypeFor(CTypeInfo::Type type) {
case CTypeInfo::Type::kFloat64:
return MachineType::Float64();
case CTypeInfo::Type::kV8Value:
+ case CTypeInfo::Type::kApiObject:
return MachineType::AnyTagged();
}
}
+} // namespace
+
+Node* EffectControlLinearizer::AdaptFastCallArgument(Node* node,
+ CTypeInfo::Type arg_type) {
+ switch (arg_type) {
+ case CTypeInfo::Type::kV8Value: {
+ int kAlign = alignof(uintptr_t);
+ int kSize = sizeof(uintptr_t);
+ Node* stack_slot = __ StackSlot(kSize, kAlign);
+
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ stack_slot, 0, node);
+
+ return stack_slot;
+ }
+ case CTypeInfo::Type::kFloat32: {
+ return __ TruncateFloat64ToFloat32(node);
+ }
+ default: {
+ return node;
+ }
+ }
+}
Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
FastApiCallNode n(node);
@@ -5060,13 +5071,9 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
inputs[0] = n.target();
for (int i = FastApiCallNode::kFastTargetInputCount;
i < c_arg_count + FastApiCallNode::kFastTargetInputCount; ++i) {
- if (c_signature->ArgumentInfo(i - 1).GetType() ==
- CTypeInfo::Type::kFloat32) {
- inputs[i] =
- __ TruncateFloat64ToFloat32(NodeProperties::GetValueInput(node, i));
- } else {
- inputs[i] = NodeProperties::GetValueInput(node, i);
- }
+ inputs[i] =
+ AdaptFastCallArgument(NodeProperties::GetValueInput(node, i),
+ c_signature->ArgumentInfo(i - 1).GetType());
}
if (c_signature->HasOptions()) {
inputs[c_arg_count + 1] = stack_slot;
@@ -5113,6 +5120,7 @@ Node* EffectControlLinearizer::LowerFastApiCall(Node* node) {
c_call_result, CheckForMinusZeroMode::kCheckForMinusZero);
break;
case CTypeInfo::Type::kV8Value:
+ case CTypeInfo::Type::kApiObject:
UNREACHABLE();
}
@@ -5859,11 +5867,10 @@ Node* EffectControlLinearizer::CallBuiltin(Builtins::Name builtin,
Node* EffectControlLinearizer::LowerAssertType(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kAssertType);
Type type = OpParameter<Type>(node->op());
- DCHECK(type.IsRange());
- auto range = type.AsRange();
+ CHECK(type.CanBeAsserted());
Node* const input = node->InputAt(0);
- Node* const min = __ NumberConstant(range->Min());
- Node* const max = __ NumberConstant(range->Max());
+ Node* const min = __ NumberConstant(type.Min());
+ Node* const max = __ NumberConstant(type.Max());
CallBuiltin(Builtins::kCheckNumberInRange, node->op()->properties(), input,
min, max, __ SmiConstant(node->id()));
return input;
@@ -6462,15 +6469,47 @@ Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
#undef __
+namespace {
+
+MaskArrayIndexEnable MaskArrayForPoisonLevel(
+ PoisoningMitigationLevel poison_level) {
+ return (poison_level != PoisoningMitigationLevel::kDontPoison)
+ ? MaskArrayIndexEnable::kMaskArrayIndex
+ : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
+}
+
+} // namespace
+
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index,
- MaintainSchedule maintain_schedule,
+ PoisoningMitigationLevel poison_level,
+ JSHeapBroker* broker) {
+ JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr);
+ EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
+ temp_zone, source_positions, node_origins,
+ MaskArrayForPoisonLevel(poison_level),
+ MaintainSchedule::kDiscard, broker);
+ linearizer.Run();
+}
+
+void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule,
+ Zone* temp_zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins,
+ PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
- EffectControlLinearizer linearizer(
- graph, schedule, temp_zone, source_positions, node_origins,
- mask_array_index, maintain_schedule, broker);
+ JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt,
+ schedule);
+ EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler,
+ temp_zone, source_positions, node_origins,
+ MaskArrayForPoisonLevel(poison_level),
+ MaintainSchedule::kMaintain, broker);
+ MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler,
+ poison_level);
+ SelectLowering select_lowering(&graph_assembler, js_graph->graph());
+ graph_assembler.AddInlineReducer(&memory_lowering);
+ graph_assembler.AddInlineReducer(&select_lowering);
linearizer.Run();
}
diff --git a/chromium/v8/src/compiler/effect-control-linearizer.h b/chromium/v8/src/compiler/effect-control-linearizer.h
index fbfd3046dce..fca4899263c 100644
--- a/chromium/v8/src/compiler/effect-control-linearizer.h
+++ b/chromium/v8/src/compiler/effect-control-linearizer.h
@@ -23,15 +23,18 @@ class Schedule;
class SourcePositionTable;
class JSHeapBroker;
-enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
-
-enum class MaintainSchedule { kMaintain, kDiscard };
-
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index, MaintainSchedule maintain_schedule,
- JSHeapBroker* broker);
+ PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+
+// Performs effect control linearization lowering in addition to machine
+// lowering, producing a scheduled graph that is ready for instruction
+// selection.
+V8_EXPORT_PRIVATE void LowerToMachineSchedule(
+ JSGraph* graph, Schedule* schedule, Zone* temp_zone,
+ SourcePositionTable* source_positions, NodeOriginTable* node_origins,
+ PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/globals.h b/chromium/v8/src/compiler/globals.h
index ff5b5a57326..66b3272b433 100644
--- a/chromium/v8/src/compiler/globals.h
+++ b/chromium/v8/src/compiler/globals.h
@@ -49,17 +49,24 @@ inline size_t hash_value(StackCheckKind kind) {
return static_cast<size_t>(kind);
}
-// The CallFeedbackRelation states whether the target feedback stored with a
-// JSCall is related to the call. If, during lowering, a JSCall (e.g. of a
-// higher order function) is replaced by a JSCall with another target, the
-// feedback has to be kept but is now unrelated.
-enum class CallFeedbackRelation { kRelated, kUnrelated };
+// The CallFeedbackRelation provides the meaning of the call feedback for a
+// TurboFan JSCall operator
+// - kReceiver: The call target was Function.prototype.apply and its receiver
+// was recorded as the feedback value.
+// - kTarget: The call target was recorded as the feedback value.
+// - kUnrelated: The feedback is no longer related to the call. If, during
+// lowering, a JSCall (e.g. of a higher order function) is replaced by a
+// JSCall with another target, the feedback has to be kept but is now
+// unrelated.
+enum class CallFeedbackRelation { kReceiver, kTarget, kUnrelated };
inline std::ostream& operator<<(std::ostream& os,
CallFeedbackRelation call_feedback_relation) {
switch (call_feedback_relation) {
- case CallFeedbackRelation::kRelated:
- return os << "CallFeedbackRelation::kRelated";
+ case CallFeedbackRelation::kReceiver:
+ return os << "CallFeedbackRelation::kReceiver";
+ case CallFeedbackRelation::kTarget:
+ return os << "CallFeedbackRelation::kTarget";
case CallFeedbackRelation::kUnrelated:
return os << "CallFeedbackRelation::kUnrelated";
}
diff --git a/chromium/v8/src/compiler/graph-assembler.cc b/chromium/v8/src/compiler/graph-assembler.cc
index 897e22ac264..73938a5fb97 100644
--- a/chromium/v8/src/compiler/graph-assembler.cc
+++ b/chromium/v8/src/compiler/graph-assembler.cc
@@ -5,6 +5,8 @@
#include "src/compiler/graph-assembler.h"
#include "src/codegen/code-factory.h"
+#include "src/compiler/access-builder.h"
+#include "src/compiler/graph-reducer.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
// For TNode types.
@@ -329,6 +331,21 @@ BasicBlock* GraphAssembler::BasicBlockUpdater::Finalize(BasicBlock* original) {
return block;
}
+class V8_NODISCARD GraphAssembler::BlockInlineReduction {
+ public:
+ explicit BlockInlineReduction(GraphAssembler* gasm) : gasm_(gasm) {
+ DCHECK(!gasm_->inline_reductions_blocked_);
+ gasm_->inline_reductions_blocked_ = true;
+ }
+ ~BlockInlineReduction() {
+ DCHECK(gasm_->inline_reductions_blocked_);
+ gasm_->inline_reductions_blocked_ = false;
+ }
+
+ private:
+ GraphAssembler* gasm_;
+};
+
GraphAssembler::GraphAssembler(
MachineGraph* mcgraph, Zone* zone,
base::Optional<NodeChangedCallback> node_changed_callback,
@@ -342,6 +359,8 @@ GraphAssembler::GraphAssembler(
? new BasicBlockUpdater(schedule, mcgraph->graph(),
mcgraph->common(), zone)
: nullptr),
+ inline_reducers_(zone),
+ inline_reductions_blocked_(false),
loop_headers_(zone),
mark_loop_exits_(mark_loop_exits) {}
@@ -530,6 +549,31 @@ Node* JSGraphAssembler::StoreField(FieldAccess const& access, Node* object,
value, effect(), control()));
}
+#ifdef V8_MAP_PACKING
+TNode<Map> GraphAssembler::UnpackMapWord(Node* map_word) {
+ map_word = BitcastTaggedToWordForTagAndSmiBits(map_word);
+ // TODO(wenyuzhao): Clear header metadata.
+ Node* map = WordXor(map_word, IntPtrConstant(Internals::kMapWordXorMask));
+ return TNode<Map>::UncheckedCast(BitcastWordToTagged(map));
+}
+
+Node* GraphAssembler::PackMapWord(TNode<Map> map) {
+ Node* map_word = BitcastTaggedToWordForTagAndSmiBits(map);
+ Node* packed = WordXor(map_word, IntPtrConstant(Internals::kMapWordXorMask));
+ return BitcastWordToTaggedSigned(packed);
+}
+#endif
+
+TNode<Map> GraphAssembler::LoadMap(Node* object) {
+ Node* map_word = Load(MachineType::TaggedPointer(), object,
+ HeapObject::kMapOffset - kHeapObjectTag);
+#ifdef V8_MAP_PACKING
+ return UnpackMapWord(map_word);
+#else
+ return TNode<Map>::UncheckedCast(map_word);
+#endif
+}
+
Node* JSGraphAssembler::StoreElement(ElementAccess const& access, Node* object,
Node* index, Node* value) {
return AddNode(graph()->NewNode(simplified()->StoreElement(access), object,
@@ -968,6 +1012,28 @@ Node* GraphAssembler::AddClonedNode(Node* node) {
}
Node* GraphAssembler::AddNode(Node* node) {
+ if (!inline_reducers_.empty() && !inline_reductions_blocked_) {
+ // Reducers may add new nodes to the graph using this graph assembler,
+ // however they should never introduce nodes that need further reduction,
+ // so block reduction
+ BlockInlineReduction scope(this);
+ Reduction reduction;
+ for (auto reducer : inline_reducers_) {
+ reduction = reducer->Reduce(node, nullptr);
+ if (reduction.Changed()) break;
+ }
+ if (reduction.Changed()) {
+ Node* replacement = reduction.replacement();
+ if (replacement != node) {
+ // Replace all uses of node and kill the node to make sure we don't
+ // leave dangling dead uses.
+ NodeProperties::ReplaceUses(node, replacement, effect(), control());
+ node->Kill();
+ return replacement;
+ }
+ }
+ }
+
if (block_updater_) {
block_updater_->AddNode(node);
}
diff --git a/chromium/v8/src/compiler/graph-assembler.h b/chromium/v8/src/compiler/graph-assembler.h
index bb3bc34a58c..d368f364074 100644
--- a/chromium/v8/src/compiler/graph-assembler.h
+++ b/chromium/v8/src/compiler/graph-assembler.h
@@ -27,6 +27,7 @@ namespace compiler {
class Schedule;
class BasicBlock;
+class Reducer;
#define PURE_ASSEMBLER_MACH_UNOP_LIST(V) \
V(BitcastFloat32ToInt32) \
@@ -270,6 +271,12 @@ class V8_EXPORT_PRIVATE GraphAssembler {
CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
#undef BINOP_DECL
+#ifdef V8_MAP_PACKING
+ Node* PackMapWord(TNode<Map> map);
+ TNode<Map> UnpackMapWord(Node* map_word);
+#endif
+ TNode<Map> LoadMap(Node* object);
+
Node* DebugBreak();
// Unreachable nodes are similar to Goto in that they reset effect/control to
@@ -436,6 +443,16 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void ConnectUnreachableToEnd();
+ // Add an inline reducers such that nodes added to the graph will be run
+ // through the reducers and possibly further lowered. Each reducer should
+ // operate on independent node types since once a reducer changes a node we
+ // no longer run any other reducers on that node. The reducers should also
+ // only generate new nodes that wouldn't be further reduced, as new nodes
+ // generated by a reducer won't be passed through the reducers again.
+ void AddInlineReducer(Reducer* reducer) {
+ inline_reducers_.push_back(reducer);
+ }
+
Control control() const { return Control(control_); }
Effect effect() const { return Effect(effect_); }
@@ -532,6 +549,8 @@ class V8_EXPORT_PRIVATE GraphAssembler {
};
private:
+ class BlockInlineReduction;
+
template <typename... Vars>
void BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
@@ -551,6 +570,11 @@ class V8_EXPORT_PRIVATE GraphAssembler {
base::Optional<NodeChangedCallback> node_changed_callback_;
std::unique_ptr<BasicBlockUpdater> block_updater_;
+ // Inline reducers enable reductions to be performed to nodes as they are
+ // added to the graph with the graph assembler.
+ ZoneVector<Reducer*> inline_reducers_;
+ bool inline_reductions_blocked_;
+
// Track loop information in order to properly mark loop exits with
// {LoopExit,LoopExitEffect,LoopExitValue} nodes. The outermost level has
// a nesting level of 0. See also GraphAssembler::LoopScope.
diff --git a/chromium/v8/src/compiler/graph-reducer.cc b/chromium/v8/src/compiler/graph-reducer.cc
index 998f37eea80..643f6622623 100644
--- a/chromium/v8/src/compiler/graph-reducer.cc
+++ b/chromium/v8/src/compiler/graph-reducer.cc
@@ -112,7 +112,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop
- // supporting --no-local-heaps --no-turbo-direct-heap-access.
+ // supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref;
StdoutStream{} << "- In-place update of #" << *node << " by reducer "
<< (*i)->reducer_name() << std::endl;
@@ -125,7 +125,7 @@ Reduction GraphReducer::Reduce(Node* const node) {
if (FLAG_trace_turbo_reduction) {
UnparkedScopeIfNeeded unparked(broker_);
// TODO(neis): Disallow racy handle dereference once we stop
- // supporting --no-local-heaps --no-turbo-direct-heap-access.
+ // supporting --no-local-heaps --no-concurrent-inlining.
AllowHandleDereference allow_deref;
StdoutStream{} << "- Replacement of #" << *node << " with #"
<< *(reduction.replacement()) << " by reducer "
diff --git a/chromium/v8/src/compiler/graph-visualizer.cc b/chromium/v8/src/compiler/graph-visualizer.cc
index eb3e4168f9a..cc1d07085fe 100644
--- a/chromium/v8/src/compiler/graph-visualizer.cc
+++ b/chromium/v8/src/compiler/graph-visualizer.cc
@@ -421,7 +421,7 @@ std::ostream& operator<<(std::ostream& os, const GraphAsJSON& ad) {
class GraphC1Visualizer {
public:
- GraphC1Visualizer(std::ostream& os, Zone* zone); // NOLINT
+ GraphC1Visualizer(std::ostream& os, Zone* zone);
GraphC1Visualizer(const GraphC1Visualizer&) = delete;
GraphC1Visualizer& operator=(const GraphC1Visualizer&) = delete;
@@ -1132,8 +1132,9 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperandAsJSON& o) {
os << ",\"tooltip\": \"MUST_HAVE_SLOT\"";
break;
}
- case UnallocatedOperand::SAME_AS_FIRST_INPUT: {
- os << ",\"tooltip\": \"SAME_AS_FIRST_INPUT\"";
+ case UnallocatedOperand::SAME_AS_INPUT: {
+ os << ",\"tooltip\": \"SAME_AS_INPUT: " << unalloc->input_index()
+ << "\"";
break;
}
case UnallocatedOperand::REGISTER_OR_SLOT: {
diff --git a/chromium/v8/src/compiler/graph-visualizer.h b/chromium/v8/src/compiler/graph-visualizer.h
index 55859330157..39a2ef50210 100644
--- a/chromium/v8/src/compiler/graph-visualizer.h
+++ b/chromium/v8/src/compiler/graph-visualizer.h
@@ -6,7 +6,8 @@
#define V8_COMPILER_GRAPH_VISUALIZER_H_
#include <stdio.h>
-#include <fstream> // NOLINT(readability/streams)
+
+#include <fstream>
#include <iosfwd>
#include <memory>
diff --git a/chromium/v8/src/compiler/graph.h b/chromium/v8/src/compiler/graph.h
index eb6bfd7e77a..25682dc9aeb 100644
--- a/chromium/v8/src/compiler/graph.h
+++ b/chromium/v8/src/compiler/graph.h
@@ -67,7 +67,7 @@ class V8_EXPORT_PRIVATE Graph final : public NON_EXPORTED_BASE(ZoneObject) {
// Factory template for nodes with static input counts.
// Note: Template magic below is used to ensure this method is only considered
- // for argument types convertible to Node* during overload resoluation.
+ // for argument types convertible to Node* during overload resolution.
template <typename... Nodes,
typename = typename std::enable_if_t<
base::all(std::is_convertible<Nodes, Node*>::value...)>>
diff --git a/chromium/v8/src/compiler/heap-refs.cc b/chromium/v8/src/compiler/heap-refs.cc
new file mode 100644
index 00000000000..7931407a7ec
--- /dev/null
+++ b/chromium/v8/src/compiler/heap-refs.cc
@@ -0,0 +1,4594 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/heap-refs.h"
+
+#ifdef ENABLE_SLOW_DCHECKS
+#include <algorithm>
+#endif
+
+#include "src/api/api-inl.h"
+#include "src/ast/modules.h"
+#include "src/codegen/code-factory.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-heap-broker.h"
+#include "src/execution/protectors-inl.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/heap-number-inl.h"
+#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/property-cell.h"
+#include "src/objects/template-objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE(broker, x) TRACE_BROKER(broker, x)
+#define TRACE_MISSING(broker, x) TRACE_BROKER_MISSING(broker, x)
+
+#define FORWARD_DECL(Name, ...) class Name##Data;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
+
+// There are several kinds of ObjectData values.
+//
+// kSmi: The underlying V8 object is a Smi and the data is an instance of the
+// base class (ObjectData), i.e. it's basically just the handle. Because the
+// object is a Smi, it's safe to access the handle in order to extract the
+// number value, and AsSmi() does exactly that.
+//
+// kSerializedHeapObject: The underlying V8 object is a HeapObject and the
+// data is an instance of the corresponding (most-specific) subclass, e.g.
+// JSFunctionData, which provides serialized information about the object.
+//
+// kBackgroundSerializedHeapObject: Like kSerializedHeapObject, but
+// allows serialization from the background thread.
+//
+// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
+// data is an instance of the base class (ObjectData), i.e. it basically
+// carries no information other than the handle.
+//
+// kNeverSerializedHeapObject: The underlying V8 object is a (potentially
+// mutable) HeapObject and the data is an instance of ObjectData. Its handle
+// must be persistent so that the GC can update it at a safepoint. Via this
+// handle, the object can be accessed concurrently to the main thread. To be
+// used the flag --concurrent-inlining must be on.
+//
+// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
+// HeapObject and the data is an instance of ObjectData. For
+// ReadOnlyHeapObjects, it is OK to access heap even from off-thread, so
+// these objects need not be serialized.
+enum ObjectDataKind {
+ kSmi,
+ kSerializedHeapObject,
+ kBackgroundSerializedHeapObject,
+ kUnserializedHeapObject,
+ kNeverSerializedHeapObject,
+ kUnserializedReadOnlyHeapObject
+};
+
+namespace {
+
+bool IsReadOnlyHeapObjectForCompiler(HeapObject object) {
+ DisallowGarbageCollection no_gc;
+ // TODO(jgruber): Remove this compiler-specific predicate and use the plain
+ // heap predicate instead. This would involve removing the special cases for
+ // builtins.
+ return (object.IsCode() && Code::cast(object).is_builtin()) ||
+ (object.IsHeapObject() &&
+ ReadOnlyHeap::Contains(HeapObject::cast(object)));
+}
+
+} // namespace
+
+class ObjectData : public ZoneObject {
+ public:
+ ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
+ ObjectDataKind kind)
+ : object_(object),
+ kind_(kind)
+#ifdef DEBUG
+ ,
+ broker_(broker)
+#endif // DEBUG
+ {
+ // This assignment ensures we don't end up inserting the same object
+ // in an endless recursion.
+ *storage = this;
+
+ TRACE(broker, "Creating data " << this << " for handle " << object.address()
+ << " (" << Brief(*object) << ")");
+
+ // It is safe to access read only heap objects and builtins from a
+ // background thread. When we read fileds of these objects, we may create
+ // ObjectData on the background thread even without a canonical handle
+ // scope. This is safe too since we don't create handles but just get
+ // handles from read only root table or builtins table which is what
+ // canonical scope uses as well. For all other objects we should have
+ // created ObjectData in canonical handle scope on the main thread.
+ CHECK_IMPLIES(
+ broker->mode() == JSHeapBroker::kDisabled ||
+ broker->mode() == JSHeapBroker::kSerializing,
+ broker->isolate()->handle_scope_data()->canonical_scope != nullptr);
+ CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
+ kind == kUnserializedReadOnlyHeapObject || kind == kSmi ||
+ kind == kNeverSerializedHeapObject ||
+ kind == kBackgroundSerializedHeapObject);
+ CHECK_IMPLIES(kind == kUnserializedReadOnlyHeapObject,
+ object->IsHeapObject() && IsReadOnlyHeapObjectForCompiler(
+ HeapObject::cast(*object)));
+ }
+
+#define DECLARE_IS(Name, ...) bool Is##Name() const;
+ HEAP_BROKER_OBJECT_LIST(DECLARE_IS)
+#undef DECLARE_IS
+
+#define DECLARE_AS(Name, ...) Name##Data* As##Name();
+ HEAP_BROKER_OBJECT_LIST(DECLARE_AS)
+#undef DECLARE_AS
+
+ Handle<Object> object() const { return object_; }
+ ObjectDataKind kind() const { return kind_; }
+ bool is_smi() const { return kind_ == kSmi; }
+ bool should_access_heap() const {
+ return kind_ == kUnserializedHeapObject ||
+ kind_ == kNeverSerializedHeapObject ||
+ kind_ == kUnserializedReadOnlyHeapObject;
+ }
+ bool IsNull() const { return object_->IsNull(); }
+
+#ifdef DEBUG
+ enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
+ mutable Usage used_status = Usage::kUnused;
+
+ JSHeapBroker* broker() const { return broker_; }
+#endif // DEBUG
+
+ private:
+ Handle<Object> const object_;
+ ObjectDataKind const kind_;
+#ifdef DEBUG
+ JSHeapBroker* const broker_; // For DCHECKs.
+#endif // DEBUG
+};
+
+namespace {
+
+template <class T>
+constexpr bool IsSerializedRef() {
+ return ref_traits<T>::ref_serialization_kind ==
+ RefSerializationKind::kSerialized;
+}
+
+RefSerializationKind RefSerializationKindOf(ObjectData* const data) {
+ Object o = *data->object();
+ if (o.IsSmi()) {
+ return RefSerializationKind::kNeverSerialized;
+#define DEFINE_REF_SERIALIZATION_KIND(Name, Kind) \
+ } \
+ /* NOLINTNEXTLINE(readability/braces) */ \
+ else if (o.Is##Name()) { \
+ return ref_traits<Name>::ref_serialization_kind;
+ HEAP_BROKER_OBJECT_LIST(DEFINE_REF_SERIALIZATION_KIND)
+#undef DEFINE_REF_SERIALIZATION_KIND
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+class HeapObjectData : public ObjectData {
+ public:
+ HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<HeapObject> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
+
+ base::Optional<bool> TryGetBooleanValue(JSHeapBroker* broker) const;
+ ObjectData* map() const { return map_; }
+ InstanceType GetMapInstanceType() const;
+
+ private:
+ base::Optional<bool> TryGetBooleanValueImpl(JSHeapBroker* broker) const;
+
+ ObjectData* const map_;
+};
+
+class PropertyCellData : public HeapObjectData {
+ public:
+ PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<PropertyCell> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
+
+ bool Serialize(JSHeapBroker* broker);
+
+ PropertyDetails property_details() const {
+ CHECK(serialized());
+ return property_details_;
+ }
+
+ ObjectData* value() const {
+ DCHECK(serialized());
+ return value_;
+ }
+
+ private:
+ PropertyDetails property_details_ = PropertyDetails::Empty();
+ ObjectData* value_ = nullptr;
+
+ bool serialized() const { return value_ != nullptr; }
+};
+
+// TODO(mslekova): Once we have real-world usage data, we might want to
+// reimplement this as sorted vector instead, to reduce the memory overhead.
+typedef ZoneMap<ObjectData*, HolderLookupResult> KnownReceiversMap;
+
+class FunctionTemplateInfoData : public HeapObjectData {
+ public:
+ FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FunctionTemplateInfo> object);
+
+ bool is_signature_undefined() const { return is_signature_undefined_; }
+ bool accept_any_receiver() const { return accept_any_receiver_; }
+ bool has_call_code() const { return has_call_code_; }
+
+ void SerializeCallCode(JSHeapBroker* broker);
+ ObjectData* call_code() const { return call_code_; }
+ ZoneVector<Address> c_functions() const { return c_functions_; }
+ ZoneVector<const CFunctionInfo*> c_signatures() const {
+ return c_signatures_;
+ }
+ KnownReceiversMap& known_receivers() { return known_receivers_; }
+
+ private:
+ bool is_signature_undefined_ = false;
+ bool accept_any_receiver_ = false;
+ bool has_call_code_ = false;
+
+ ObjectData* call_code_ = nullptr;
+ ZoneVector<Address> c_functions_;
+ ZoneVector<const CFunctionInfo*> c_signatures_;
+ KnownReceiversMap known_receivers_;
+};
+
+class CallHandlerInfoData : public HeapObjectData {
+ public:
+ CallHandlerInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<CallHandlerInfo> object);
+
+ Address callback() const { return callback_; }
+
+ void Serialize(JSHeapBroker* broker);
+ ObjectData* data() const { return data_; }
+
+ private:
+ Address const callback_;
+
+ ObjectData* data_ = nullptr;
+};
+
+namespace {
+
+ZoneVector<Address> GetCFunctions(FixedArray function_overloads, Zone* zone) {
+ const int len = function_overloads.length() /
+ FunctionTemplateInfo::kFunctionOverloadEntrySize;
+ ZoneVector<Address> c_functions = ZoneVector<Address>(len, zone);
+ for (int i = 0; i < len; i++) {
+ c_functions[i] = v8::ToCData<Address>(function_overloads.get(
+ FunctionTemplateInfo::kFunctionOverloadEntrySize * i));
+ }
+ return c_functions;
+}
+
+ZoneVector<const CFunctionInfo*> GetCSignatures(FixedArray function_overloads,
+ Zone* zone) {
+ const int len = function_overloads.length() /
+ FunctionTemplateInfo::kFunctionOverloadEntrySize;
+ ZoneVector<const CFunctionInfo*> c_signatures =
+ ZoneVector<const CFunctionInfo*>(len, zone);
+ for (int i = 0; i < len; i++) {
+ c_signatures[i] = v8::ToCData<const CFunctionInfo*>(function_overloads.get(
+ FunctionTemplateInfo::kFunctionOverloadEntrySize * i + 1));
+ }
+ return c_signatures;
+}
+
+} // namespace
+
+FunctionTemplateInfoData::FunctionTemplateInfoData(
+ JSHeapBroker* broker, ObjectData** storage,
+ Handle<FunctionTemplateInfo> object)
+ : HeapObjectData(broker, storage, object),
+ c_functions_(broker->zone()),
+ c_signatures_(broker->zone()),
+ known_receivers_(broker->zone()) {
+ DCHECK(!broker->is_concurrent_inlining());
+
+ auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
+
+ FixedArray function_overloads_array =
+ FixedArray::cast(function_template_info->GetCFunctionOverloads());
+ c_functions_ = GetCFunctions(function_overloads_array, broker->zone());
+ c_signatures_ = GetCSignatures(function_overloads_array, broker->zone());
+
+ is_signature_undefined_ =
+ function_template_info->signature().IsUndefined(broker->isolate());
+ accept_any_receiver_ = function_template_info->accept_any_receiver();
+
+ CallOptimization call_optimization(broker->local_isolate_or_isolate(),
+ object);
+ has_call_code_ = call_optimization.is_simple_api_call();
+}
+
+CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<CallHandlerInfo> object)
+ : HeapObjectData(broker, storage, object),
+ callback_(v8::ToCData<Address>(object->callback())) {
+ DCHECK(!broker->is_concurrent_inlining());
+}
+
+PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<PropertyCell> object,
+ ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind) {}
+
+bool PropertyCellData::Serialize(JSHeapBroker* broker) {
+ if (serialized()) return true;
+
+ TraceScope tracer(broker, this, "PropertyCellData::Serialize");
+ auto cell = Handle<PropertyCell>::cast(object());
+
+ // While this code runs on a background thread, the property cell might
+ // undergo state transitions via calls to PropertyCell::Transition. These
+ // transitions follow a certain protocol on which we rely here to ensure that
+ // we only report success when we can guarantee consistent data. A key
+ // property is that after transitioning from cell type A to B (A != B), there
+ // will never be a transition back to A, unless A is kConstant and the new
+ // value is the hole (i.e. the property cell was invalidated, which is a final
+ // state).
+
+ PropertyDetails property_details = cell->property_details(kAcquireLoad);
+
+ Handle<Object> value =
+ broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
+ if (broker->ObjectMayBeUninitialized(value)) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+
+ {
+ PropertyDetails property_details_again =
+ cell->property_details(kAcquireLoad);
+ if (property_details != property_details_again) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+ }
+
+ if (property_details.cell_type() == PropertyCellType::kConstant) {
+ Handle<Object> value_again =
+ broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
+ if (*value != *value_again) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+ }
+
+ ObjectData* value_data = broker->TryGetOrCreateData(value);
+ if (value_data == nullptr) {
+ DCHECK(!broker->IsMainThread());
+ return false;
+ }
+
+ PropertyCell::CheckDataIsCompatible(property_details, *value);
+
+ DCHECK(!serialized());
+ property_details_ = property_details;
+ value_ = value_data;
+ DCHECK(serialized());
+ return true;
+}
+
+void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
+ if (call_code_ != nullptr) return;
+
+ TraceScope tracer(broker, this,
+ "FunctionTemplateInfoData::SerializeCallCode");
+ auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
+ call_code_ =
+ broker->GetOrCreateData(function_template_info->call_code(kAcquireLoad));
+ if (call_code_->should_access_heap()) {
+ // TODO(mvstanton): When ObjectRef is in the never serialized list, this
+ // code can be removed.
+ broker->GetOrCreateData(
+ Handle<CallHandlerInfo>::cast(call_code_->object())->data());
+ } else {
+ call_code_->AsCallHandlerInfo()->Serialize(broker);
+ }
+}
+
+void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
+ if (data_ != nullptr) return;
+
+ TraceScope tracer(broker, this, "CallHandlerInfoData::Serialize");
+ auto call_handler_info = Handle<CallHandlerInfo>::cast(object());
+ data_ = broker->GetOrCreateData(call_handler_info->data());
+}
+
+class JSReceiverData : public HeapObjectData {
+ public:
+ JSReceiverData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSReceiver> object, ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind) {}
+};
+
+class JSObjectData : public JSReceiverData {
+ public:
+ JSObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSObject> object);
+
+ // Recursive serialization of all reachable JSObjects.
+ void SerializeAsBoilerplate(JSHeapBroker* broker);
+ ObjectData* GetInobjectField(int property_index) const;
+
+ // Shallow serialization of {elements}.
+ void SerializeElements(JSHeapBroker* broker);
+ bool serialized_elements() const { return serialized_elements_; }
+ ObjectData* elements() const;
+
+ void SerializeObjectCreateMap(JSHeapBroker* broker);
+
+ ObjectData* object_create_map(
+ JSHeapBroker* broker) const { // Can be nullptr.
+ if (!serialized_object_create_map_) {
+ DCHECK_NULL(object_create_map_);
+ TRACE_MISSING(broker, "object_create_map on " << this);
+ }
+ return object_create_map_;
+ }
+
+ ObjectData* GetOwnConstantElement(
+ JSHeapBroker* broker, uint32_t index,
+ SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ ObjectData* GetOwnFastDataProperty(
+ JSHeapBroker* broker, Representation representation,
+ FieldIndex field_index,
+ SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ ObjectData* GetOwnDictionaryProperty(JSHeapBroker* broker,
+ InternalIndex dict_index,
+ SerializationPolicy policy);
+
+ // This method is only used to assert our invariants.
+ bool cow_or_empty_elements_tenured() const;
+
+ private:
+ void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths);
+
+ ObjectData* elements_ = nullptr;
+ bool cow_or_empty_elements_tenured_ = false;
+ // The {serialized_as_boilerplate} flag is set when all recursively
+ // reachable JSObjects are serialized.
+ bool serialized_as_boilerplate_ = false;
+ bool serialized_elements_ = false;
+
+ ZoneVector<ObjectData*> inobject_fields_;
+
+ bool serialized_object_create_map_ = false;
+ ObjectData* object_create_map_ = nullptr;
+
+ // Elements (indexed properties) that either
+ // (1) are known to exist directly on the object as non-writable and
+ // non-configurable, or (2) are known not to (possibly they don't exist at
+ // all). In case (2), the second pair component is nullptr.
+ ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
+ // Properties that either:
+ // (1) are known to exist directly on the object, or
+ // (2) are known not to (possibly they don't exist at all).
+ // In case (2), the second pair component is nullptr.
+ // For simplicity, this may in theory overlap with inobject_fields_.
+ // For fast mode objects, the keys of the map are the property_index() values
+ // of the respective property FieldIndex'es. For slow mode objects, the keys
+ // are the dictionary indicies.
+ ZoneUnorderedMap<int, ObjectData*> own_properties_;
+};
+
+void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
+ if (serialized_object_create_map_) return;
+ serialized_object_create_map_ = true;
+
+ TraceScope tracer(broker, this, "JSObjectData::SerializeObjectCreateMap");
+ Handle<JSObject> jsobject = Handle<JSObject>::cast(object());
+
+ if (jsobject->map().is_prototype_map()) {
+ Handle<Object> maybe_proto_info(jsobject->map().prototype_info(),
+ broker->isolate());
+ if (maybe_proto_info->IsPrototypeInfo()) {
+ auto proto_info = Handle<PrototypeInfo>::cast(maybe_proto_info);
+ if (proto_info->HasObjectCreateMap()) {
+ DCHECK_NULL(object_create_map_);
+ object_create_map_ =
+ broker->GetOrCreateData(proto_info->ObjectCreateMap());
+ }
+ }
+ }
+}
+
+namespace {
+
+base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
+ Handle<Object> receiver,
+ uint32_t index,
+ bool constant_only) {
+ LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
+ if (it.state() == LookupIterator::DATA &&
+ (!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
+ return MakeRef(broker, it.GetDataValue());
+ }
+ return base::nullopt;
+}
+
+ObjectRef GetOwnFastDataPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ Representation representation,
+ FieldIndex field_index) {
+ Handle<Object> constant =
+ JSObject::FastPropertyAt(receiver, representation, field_index);
+ return ObjectRef(broker, constant);
+}
+
+ObjectRef GetOwnDictionaryPropertyFromHeap(JSHeapBroker* broker,
+ Handle<JSObject> receiver,
+ InternalIndex dict_index) {
+ Handle<Object> constant =
+ JSObject::DictionaryPropertyAt(receiver, dict_index);
+ return ObjectRef(broker, constant);
+}
+
+} // namespace
+
+ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
+ uint32_t index,
+ SerializationPolicy policy) {
+ for (auto const& p : own_constant_elements_) {
+ if (p.first == index) return p.second;
+ }
+
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
+ return nullptr;
+ }
+
+ base::Optional<ObjectRef> element =
+ GetOwnElementFromHeap(broker, object(), index, true);
+ ObjectData* result = element.has_value() ? element->data() : nullptr;
+ own_constant_elements_.push_back({index, result});
+ return result;
+}
+
+ObjectData* JSObjectData::GetOwnFastDataProperty(JSHeapBroker* broker,
+ Representation representation,
+ FieldIndex field_index,
+ SerializationPolicy policy) {
+ auto p = own_properties_.find(field_index.property_index());
+ if (p != own_properties_.end()) return p->second;
+
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_MISSING(broker, "knowledge about fast property with index "
+ << field_index.property_index() << " on "
+ << this);
+ return nullptr;
+ }
+
+ ObjectRef property = GetOwnFastDataPropertyFromHeap(
+ broker, Handle<JSObject>::cast(object()), representation, field_index);
+ ObjectData* result(property.data());
+ own_properties_.insert(std::make_pair(field_index.property_index(), result));
+ return result;
+}
+
+ObjectData* JSObjectData::GetOwnDictionaryProperty(JSHeapBroker* broker,
+ InternalIndex dict_index,
+ SerializationPolicy policy) {
+ auto p = own_properties_.find(dict_index.as_int());
+ if (p != own_properties_.end()) return p->second;
+
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_MISSING(broker, "knowledge about dictionary property with index "
+ << dict_index.as_int() << " on " << this);
+ return nullptr;
+ }
+
+ ObjectRef property = GetOwnDictionaryPropertyFromHeap(
+ broker, Handle<JSObject>::cast(object()), dict_index);
+ ObjectData* result(property.data());
+ own_properties_.insert(std::make_pair(dict_index.as_int(), result));
+ return result;
+}
+
+class JSTypedArrayData : public JSObjectData {
+ public:
+ JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSTypedArray> object)
+ : JSObjectData(broker, storage, object) {}
+
+ // TODO(v8:7790): Once JSObject is no longer serialized, also make
+ // JSTypedArrayRef never-serialized.
+ STATIC_ASSERT(IsSerializedRef<JSObject>());
+
+ void Serialize(JSHeapBroker* broker);
+ bool serialized() const { return serialized_; }
+
+ bool is_on_heap() const { return is_on_heap_; }
+ size_t length() const { return length_; }
+ void* data_ptr() const { return data_ptr_; }
+
+ ObjectData* buffer() const { return buffer_; }
+
+ private:
+ bool serialized_ = false;
+ bool is_on_heap_ = false;
+ size_t length_ = 0;
+ void* data_ptr_ = nullptr;
+ ObjectData* buffer_ = nullptr;
+};
+
+void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
+
+ is_on_heap_ = typed_array->is_on_heap();
+ length_ = typed_array->length();
+ data_ptr_ = typed_array->DataPtr();
+
+ if (!is_on_heap()) {
+ DCHECK_NULL(buffer_);
+ buffer_ = broker->GetOrCreateData(typed_array->buffer());
+ }
+}
+
+class ArrayBoilerplateDescriptionData : public HeapObjectData {
+ public:
+ ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<ArrayBoilerplateDescription> object)
+ : HeapObjectData(broker, storage, object),
+ constants_elements_length_(object->constant_elements().length()) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+
+ int constants_elements_length() const { return constants_elements_length_; }
+
+ private:
+ int const constants_elements_length_;
+};
+
+class JSDataViewData : public JSObjectData {
+ public:
+ JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSDataView> object);
+
+ size_t byte_length() const { return byte_length_; }
+
+ private:
+ size_t const byte_length_;
+};
+
+class JSBoundFunctionData : public JSObjectData {
+ public:
+ JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSBoundFunction> object);
+
+ bool Serialize(JSHeapBroker* broker);
+ bool serialized() const { return serialized_; }
+
+ ObjectData* bound_target_function() const {
+ DCHECK(!broker()->is_concurrent_inlining());
+ return bound_target_function_;
+ }
+ ObjectData* bound_this() const {
+ DCHECK(!broker()->is_concurrent_inlining());
+ return bound_this_;
+ }
+ ObjectData* bound_arguments() const {
+ DCHECK(!broker()->is_concurrent_inlining());
+ return bound_arguments_;
+ }
+
+ private:
+ bool serialized_ = false;
+
+ ObjectData* bound_target_function_ = nullptr;
+ ObjectData* bound_this_ = nullptr;
+ ObjectData* bound_arguments_ = nullptr;
+};
+
+class JSFunctionData : public JSObjectData {
+ public:
+ JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSFunction> object);
+
+ bool has_feedback_vector() const { return has_feedback_vector_; }
+ bool has_initial_map() const { return has_initial_map_; }
+ bool has_prototype() const { return has_prototype_; }
+ bool PrototypeRequiresRuntimeLookup() const {
+ return PrototypeRequiresRuntimeLookup_;
+ }
+
+ void Serialize(JSHeapBroker* broker);
+ bool serialized() const { return serialized_; }
+
+ void SerializeCodeAndFeedback(JSHeapBroker* broker);
+ bool serialized_code_and_feedback() const {
+ return serialized_code_and_feedback_;
+ }
+
+ ObjectData* context() const { return context_; }
+ ObjectData* native_context() const { return native_context_; }
+ ObjectData* initial_map() const { return initial_map_; }
+ ObjectData* prototype() const { return prototype_; }
+ ObjectData* shared() const { return shared_; }
+ ObjectData* raw_feedback_cell() const {
+ DCHECK(serialized_code_and_feedback());
+ return feedback_cell_;
+ }
+ ObjectData* feedback_vector() const {
+ DCHECK(serialized_code_and_feedback());
+ return feedback_vector_;
+ }
+ ObjectData* code() const {
+ DCHECK(serialized_code_and_feedback());
+ DCHECK(!broker()->is_concurrent_inlining());
+ return code_;
+ }
+ int initial_map_instance_size_with_min_slack() const {
+ CHECK(serialized_);
+ return initial_map_instance_size_with_min_slack_;
+ }
+
+ private:
+ bool has_feedback_vector_;
+ bool has_initial_map_;
+ bool has_prototype_;
+ bool PrototypeRequiresRuntimeLookup_;
+
+ bool serialized_ = false;
+ bool serialized_code_and_feedback_ = false;
+
+ ObjectData* context_ = nullptr;
+ ObjectData* native_context_ = nullptr;
+ ObjectData* initial_map_ = nullptr;
+ ObjectData* prototype_ = nullptr;
+ ObjectData* shared_ = nullptr;
+ ObjectData* feedback_vector_ = nullptr;
+ ObjectData* feedback_cell_ = nullptr;
+ ObjectData* code_ = nullptr;
+ int initial_map_instance_size_with_min_slack_;
+};
+
+class RegExpBoilerplateDescriptionData : public HeapObjectData {
+ public:
+ RegExpBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<RegExpBoilerplateDescription> object)
+ : HeapObjectData(broker, storage, object) {}
+
+ void Serialize(JSHeapBroker* broker);
+ ObjectData* data() const {
+ CHECK(serialized_);
+ return data_;
+ }
+ ObjectData* source() const {
+ CHECK(serialized_);
+ return source_;
+ }
+ int flags() const {
+ CHECK(serialized_);
+ return flags_;
+ }
+
+ private:
+ bool serialized_ = false;
+ ObjectData* data_ = nullptr;
+ ObjectData* source_ = nullptr;
+ int flags_;
+};
+
+class HeapNumberData : public HeapObjectData {
+ public:
+ HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<HeapNumber> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
+ : HeapObjectData(broker, storage, object, kind),
+ value_(object->value()) {}
+
+ double value() const { return value_; }
+
+ private:
+ double const value_;
+};
+
+class ContextData : public HeapObjectData {
+ public:
+ ContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<Context> object);
+
+ ObjectData* previous(
+ JSHeapBroker* broker,
+ SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+
+ // Returns nullptr if the slot index isn't valid or wasn't serialized,
+ // unless {policy} is {kSerializeIfNeeded}.
+ ObjectData* GetSlot(
+ JSHeapBroker* broker, int index,
+ SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+
+ private:
+ ZoneMap<int, ObjectData*> slots_;
+ ObjectData* previous_ = nullptr;
+};
+
+ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<Context> object)
+ : HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
+
+ObjectData* ContextData::previous(JSHeapBroker* broker,
+ SerializationPolicy policy) {
+ if (policy == SerializationPolicy::kSerializeIfNeeded &&
+ previous_ == nullptr) {
+ TraceScope tracer(broker, this, "ContextData::previous");
+ Handle<Context> context = Handle<Context>::cast(object());
+ previous_ = broker->GetOrCreateData(context->unchecked_previous());
+ }
+ return previous_;
+}
+
+ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
+ SerializationPolicy policy) {
+ DCHECK_GE(index, 0);
+ auto search = slots_.find(index);
+ if (search != slots_.end()) {
+ return search->second;
+ }
+
+ if (policy == SerializationPolicy::kSerializeIfNeeded) {
+ Handle<Context> context = Handle<Context>::cast(object());
+ if (index < context->length()) {
+ TraceScope tracer(broker, this, "ContextData::GetSlot");
+ TRACE(broker, "Serializing context slot " << index);
+ ObjectData* odata = broker->GetOrCreateData(context->get(index));
+ slots_.insert(std::make_pair(index, odata));
+ return odata;
+ }
+ }
+
+ return nullptr;
+}
+
+class NativeContextData : public ContextData {
+ public:
+#define DECL_ACCESSOR(type, name) \
+ ObjectData* name() const { return name##_; }
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ const ZoneVector<ObjectData*>& function_maps() const {
+ CHECK_NE(state_, State::kUnserialized);
+ return function_maps_;
+ }
+
+ ObjectData* scope_info() const {
+ CHECK_NE(state_, State::kUnserialized);
+ return scope_info_;
+ }
+
+ NativeContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<NativeContext> object);
+ void Serialize(JSHeapBroker* broker);
+ void SerializeOnBackground(JSHeapBroker* broker);
+
+ private:
+ // After Serialize is called the class is partially serialized and it the
+ // kSerializedOnMainThread state. It then becomes kFullySerialized once
+ // SerializeOnBackground is called.
+ enum class State { kUnserialized, kSerializedOnMainThread, kFullySerialized };
+ State state_;
+
+#define DECL_MEMBER(type, name) ObjectData* name##_ = nullptr;
+ BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
+#undef DECL_MEMBER
+ ZoneVector<ObjectData*> function_maps_;
+ ObjectData* scope_info_ = nullptr;
+};
+
+class NameData : public HeapObjectData {
+ public:
+ NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+};
+
+class StringData : public NameData {
+ public:
+ StringData(JSHeapBroker* broker, ObjectData** storage, Handle<String> object);
+
+ int length() const { return length_; }
+ uint16_t first_char() const { return first_char_; }
+ base::Optional<double> to_number() const { return to_number_; }
+ bool is_external_string() const { return is_external_string_; }
+ bool is_seq_string() const { return is_seq_string_; }
+
+ ObjectData* GetCharAsStringOrUndefined(
+ JSHeapBroker* broker, uint32_t index,
+ SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+
+ private:
+ int const length_;
+ uint16_t const first_char_;
+ base::Optional<double> to_number_;
+ bool const is_external_string_;
+ bool const is_seq_string_;
+
+ // Known individual characters as strings, corresponding to the semantics of
+ // element access (s[i]). The first pair component is always less than
+ // {length_}. The second component is never nullptr.
+ ZoneVector<std::pair<uint32_t, ObjectData*>> chars_as_strings_;
+};
+
+class SymbolData : public NameData {
+ public:
+ SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
+ : NameData(broker, storage, object) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+};
+
+StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<String> object)
+ : NameData(broker, storage, object),
+ length_(object->length()),
+ first_char_(length_ > 0 ? object->Get(0) : 0),
+ to_number_(TryStringToDouble(broker->local_isolate(), object)),
+ is_external_string_(object->IsExternalString()),
+ is_seq_string_(object->IsSeqString()),
+ chars_as_strings_(broker->zone()) {
+ DCHECK(!broker->is_concurrent_inlining());
+}
+
+class InternalizedStringData : public StringData {
+ public:
+ InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<InternalizedString> object)
+ : StringData(broker, storage, object) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+};
+
+ObjectData* StringData::GetCharAsStringOrUndefined(JSHeapBroker* broker,
+ uint32_t index,
+ SerializationPolicy policy) {
+ if (index >= static_cast<uint32_t>(length())) return nullptr;
+
+ for (auto const& p : chars_as_strings_) {
+ if (p.first == index) return p.second;
+ }
+
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
+ return nullptr;
+ }
+
+ base::Optional<ObjectRef> element =
+ GetOwnElementFromHeap(broker, object(), index, true);
+ ObjectData* result = element.has_value() ? element->data() : nullptr;
+ chars_as_strings_.push_back({index, result});
+ return result;
+}
+
+namespace {
+
+bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
+ int* max_properties) {
+ DCHECK_GE(max_depth, 0);
+ DCHECK_GE(*max_properties, 0);
+
+ // Check for too deep nesting.
+ if (max_depth == 0) return false;
+
+ Isolate* const isolate = boilerplate->GetIsolate();
+
+ // If the boilerplate map has been deprecated, bailout of fast literal
+ // optimization. The map could be deprecated at some point after the line
+ // below, but it's not a correctness issue -- it only means the literal isn't
+ // created with the most up to date map(s).
+ if (boilerplate->map().is_deprecated()) return false;
+
+ // Check the elements.
+ Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
+ if (elements->length() > 0 &&
+ elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
+ if (boilerplate->HasSmiOrObjectElements()) {
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ int length = elements->length();
+ for (int i = 0; i < length; i++) {
+ if ((*max_properties)-- == 0) return false;
+ Handle<Object> value(fast_elements->get(i), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1,
+ max_properties)) {
+ return false;
+ }
+ }
+ }
+ } else if (boilerplate->HasDoubleElements()) {
+ if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+ } else {
+ return false;
+ }
+ }
+
+ // TODO(turbofan): Do we want to support out-of-object properties?
+ if (!(boilerplate->HasFastProperties() &&
+ boilerplate->property_array().length() == 0)) {
+ return false;
+ }
+
+ // Check the in-object properties.
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map().instance_descriptors(isolate, kRelaxedLoad), isolate);
+ for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+ if ((*max_properties)-- == 0) return false;
+ FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+// Maximum depth and total number of elements and properties for literal
+// graphs to be considered for fast deep-copying. The limit is chosen to
+// match the maximum number of inobject properties, to ensure that the
+// performance of using object literals is not worse than using constructor
+// functions, see crbug.com/v8/6211 for details.
+const int kMaxFastLiteralDepth = 3;
+const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) {
+ int max_properties = kMaxFastLiteralProperties;
+ return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
+ &max_properties);
+}
+
+} // namespace
+
+class AccessorInfoData : public HeapObjectData {
+ public:
+ AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<AccessorInfo> object);
+};
+
+class AllocationSiteData : public HeapObjectData {
+ public:
+ AllocationSiteData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<AllocationSite> object);
+ void SerializeBoilerplate(JSHeapBroker* broker);
+
+ bool PointsToLiteral() const { return PointsToLiteral_; }
+ AllocationType GetAllocationType() const { return GetAllocationType_; }
+ ObjectData* nested_site() const { return nested_site_; }
+ bool IsFastLiteral() const { return IsFastLiteral_; }
+ ObjectData* boilerplate() const { return boilerplate_; }
+
+ // These are only valid if PointsToLiteral is false.
+ ElementsKind GetElementsKind() const { return GetElementsKind_; }
+ bool CanInlineCall() const { return CanInlineCall_; }
+
+ private:
+ bool const PointsToLiteral_;
+ AllocationType const GetAllocationType_;
+ ObjectData* nested_site_ = nullptr;
+ bool IsFastLiteral_ = false;
+ ObjectData* boilerplate_ = nullptr;
+ ElementsKind GetElementsKind_ = NO_ELEMENTS;
+ bool CanInlineCall_ = false;
+ bool serialized_boilerplate_ = false;
+};
+
+class BigIntData : public HeapObjectData {
+ public:
+ BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object,
+ ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind),
+ as_uint64_(object->AsUint64(nullptr)) {}
+
+ uint64_t AsUint64() const { return as_uint64_; }
+
+ private:
+ const uint64_t as_uint64_;
+};
+
+struct PropertyDescriptor {
+ ObjectData* key = nullptr;
+ ObjectData* value = nullptr;
+ PropertyDetails details = PropertyDetails::Empty();
+ FieldIndex field_index;
+ ObjectData* field_owner = nullptr;
+ ObjectData* field_type = nullptr;
+};
+
+class MapData : public HeapObjectData {
+ public:
+ MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
+
+ InstanceType instance_type() const { return instance_type_; }
+ int instance_size() const { return instance_size_; }
+ byte bit_field() const { return bit_field_; }
+ byte bit_field2() const { return bit_field2_; }
+ uint32_t bit_field3() const { return bit_field3_; }
+ bool can_be_deprecated() const { return can_be_deprecated_; }
+ bool can_transition() const { return can_transition_; }
+ int in_object_properties_start_in_words() const {
+ CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
+ return in_object_properties_start_in_words_;
+ }
+ int in_object_properties() const {
+ CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
+ return in_object_properties_;
+ }
+ int constructor_function_index() const { return constructor_function_index_; }
+ int NextFreePropertyIndex() const { return next_free_property_index_; }
+ int UnusedPropertyFields() const { return unused_property_fields_; }
+ bool supports_fast_array_iteration() const {
+ return supports_fast_array_iteration_;
+ }
+ bool supports_fast_array_resize() const {
+ return supports_fast_array_resize_;
+ }
+ bool is_abandoned_prototype_map() const {
+ return is_abandoned_prototype_map_;
+ }
+
+ // Extra information.
+
+ void SerializeElementsKindGeneralizations(JSHeapBroker* broker);
+ const ZoneVector<ObjectData*>& elements_kind_generalizations() const {
+ CHECK(serialized_elements_kind_generalizations_);
+ return elements_kind_generalizations_;
+ }
+
+ // Serialize a single (or all) own slot(s) of the descriptor array and recurse
+ // on field owner(s).
+ bool TrySerializeOwnDescriptor(JSHeapBroker* broker,
+ InternalIndex descriptor_index);
+ void SerializeOwnDescriptor(JSHeapBroker* broker,
+ InternalIndex descriptor_index) {
+ CHECK(TrySerializeOwnDescriptor(broker, descriptor_index));
+ }
+ void SerializeOwnDescriptors(JSHeapBroker* broker);
+ ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
+ ObjectData* instance_descriptors() const { return instance_descriptors_; }
+
+ void SerializeRootMap(JSHeapBroker* broker);
+ ObjectData* FindRootMap() const;
+
+ void SerializeConstructor(JSHeapBroker* broker);
+ ObjectData* GetConstructor() const {
+ CHECK(serialized_constructor_);
+ return constructor_;
+ }
+
+ void SerializeBackPointer(JSHeapBroker* broker);
+ ObjectData* GetBackPointer() const {
+ CHECK(serialized_backpointer_);
+ return backpointer_;
+ }
+
+ bool TrySerializePrototype(JSHeapBroker* broker);
+ void SerializePrototype(JSHeapBroker* broker) {
+ CHECK(TrySerializePrototype(broker));
+ }
+ ObjectData* prototype() const {
+ DCHECK_EQ(serialized_prototype_, prototype_ != nullptr);
+ return prototype_;
+ }
+
+ void SerializeForElementLoad(JSHeapBroker* broker);
+
+ void SerializeForElementStore(JSHeapBroker* broker);
+
+ bool has_extra_serialized_data() const {
+ return serialized_elements_kind_generalizations_ ||
+ serialized_own_descriptors_ || serialized_constructor_ ||
+ serialized_backpointer_ || serialized_prototype_ ||
+ serialized_root_map_ || serialized_for_element_load_ ||
+ serialized_for_element_store_;
+ }
+
+ private:
+ // The following fields should be const in principle, but construction
+ // requires locking the MapUpdater lock. For this reason, it's easier to
+ // initialize these inside the constructor body, not in the initializer list.
+
+ // This block of fields will always be serialized.
+ InstanceType instance_type_;
+ int instance_size_;
+ uint32_t bit_field3_;
+ int unused_property_fields_;
+ bool is_abandoned_prototype_map_;
+ int in_object_properties_;
+
+ // These fields will only serialized if we are not concurrent inlining.
+ byte bit_field_;
+ byte bit_field2_;
+ bool can_be_deprecated_;
+ bool can_transition_;
+ int in_object_properties_start_in_words_;
+ int constructor_function_index_;
+ int next_free_property_index_;
+ bool supports_fast_array_iteration_;
+ bool supports_fast_array_resize_;
+
+ // These extra fields still have to be serialized (e.g prototype_) even with
+ // concurrent inling, since those classes have fields themselves which are not
+ // being directly read. This means that, for example, even though we can get
+ // the prototype itself with direct reads, some of its fields require
+ // serialization.
+ bool serialized_elements_kind_generalizations_ = false;
+ ZoneVector<ObjectData*> elements_kind_generalizations_;
+
+ bool serialized_own_descriptors_ = false;
+ ObjectData* instance_descriptors_ = nullptr;
+
+ bool serialized_constructor_ = false;
+ ObjectData* constructor_ = nullptr;
+
+ bool serialized_backpointer_ = false;
+ ObjectData* backpointer_ = nullptr;
+
+ bool serialized_prototype_ = false;
+ ObjectData* prototype_ = nullptr;
+
+ bool serialized_root_map_ = false;
+ ObjectData* root_map_ = nullptr;
+
+ bool serialized_for_element_load_ = false;
+
+ bool serialized_for_element_store_ = false;
+};
+
+AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<AccessorInfo> object)
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!broker->is_concurrent_inlining());
+}
+
+AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<AllocationSite> object)
+ : HeapObjectData(broker, storage, object),
+ PointsToLiteral_(object->PointsToLiteral()),
+ GetAllocationType_(object->GetAllocationType()) {
+ if (PointsToLiteral_) {
+ IsFastLiteral_ = IsInlinableFastLiteral(
+ handle(object->boilerplate(kAcquireLoad), broker->isolate()));
+ } else {
+ GetElementsKind_ = object->GetElementsKind();
+ CanInlineCall_ = object->CanInlineCall();
+ }
+}
+
+void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
+ if (serialized_boilerplate_) return;
+ serialized_boilerplate_ = true;
+
+ TraceScope tracer(broker, this, "AllocationSiteData::SerializeBoilerplate");
+ Handle<AllocationSite> site = Handle<AllocationSite>::cast(object());
+
+ CHECK(IsFastLiteral_);
+ DCHECK_NULL(boilerplate_);
+ boilerplate_ = broker->GetOrCreateData(site->boilerplate(kAcquireLoad));
+ if (!boilerplate_->should_access_heap()) {
+ boilerplate_->AsJSObject()->SerializeAsBoilerplate(broker);
+ }
+
+ DCHECK_NULL(nested_site_);
+ nested_site_ = broker->GetOrCreateData(site->nested_site());
+ if (nested_site_->IsAllocationSite() && !nested_site_->should_access_heap()) {
+ nested_site_->AsAllocationSite()->SerializeBoilerplate(broker);
+ }
+}
+
+HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<HeapObject> object, ObjectDataKind kind)
+ : ObjectData(broker, storage, object, kind),
+ // We have to use a raw cast below instead of AsMap() because of
+ // recursion. AsMap() would call IsMap(), which accesses the
+ // instance_type_ member. In the case of constructing the MapData for the
+ // meta map (whose map is itself), this member has not yet been
+ // initialized.
+ map_(broker->GetOrCreateData(object->map(kAcquireLoad))) {
+ CHECK_IMPLIES(kind == kSerializedHeapObject,
+ broker->mode() == JSHeapBroker::kSerializing);
+ CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
+ kind == kBackgroundSerializedHeapObject);
+}
+
+base::Optional<bool> HeapObjectData::TryGetBooleanValue(
+ JSHeapBroker* broker) const {
+ // Keep in sync with Object::BooleanValue.
+ auto result = TryGetBooleanValueImpl(broker);
+ DCHECK_IMPLIES(broker->IsMainThread() && result.has_value(),
+ result.value() == object()->BooleanValue(broker->isolate()));
+ return result;
+}
+
+base::Optional<bool> HeapObjectData::TryGetBooleanValueImpl(
+ JSHeapBroker* broker) const {
+ DisallowGarbageCollection no_gc;
+ Object o = *object();
+ Isolate* isolate = broker->isolate();
+ const InstanceType t = GetMapInstanceType();
+ if (o.IsTrue(isolate)) {
+ return true;
+ } else if (o.IsFalse(isolate)) {
+ return false;
+ } else if (o.IsNullOrUndefined(isolate)) {
+ return false;
+ } else if (MapRef{broker, map()}.is_undetectable()) {
+ return false; // Undetectable object is false.
+ } else if (InstanceTypeChecker::IsString(t)) {
+ // TODO(jgruber): Implement in possible cases.
+ return {};
+ } else if (InstanceTypeChecker::IsHeapNumber(t)) {
+ return {};
+ } else if (InstanceTypeChecker::IsBigInt(t)) {
+ return {};
+ }
+ return true;
+}
+
+InstanceType HeapObjectData::GetMapInstanceType() const {
+ ObjectData* map_data = map();
+ if (map_data->should_access_heap()) {
+ return Handle<Map>::cast(map_data->object())->instance_type();
+ }
+ return map_data->AsMap()->instance_type();
+}
+
+namespace {
+
+bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
+ DCHECK(!jsarray_map->is_dictionary_map());
+ DescriptorArray descriptors =
+ jsarray_map->instance_descriptors(isolate, kRelaxedLoad);
+ static_assert(
+ JSArray::kLengthOffset == JSObject::kHeaderSize,
+ "The length should be the first property on the descriptor array");
+ InternalIndex offset(0);
+ return descriptors.GetDetails(offset).IsReadOnly();
+}
+
+// Important: this predicate does not check Protectors::IsNoElementsIntact. The
+// compiler checks protectors through the compilation dependency mechanism; it
+// doesn't make sense to do that here as part of every MapData construction.
+// Callers *must* take care to take the correct dependency themselves.
+bool SupportsFastArrayIteration(JSHeapBroker* broker, Handle<Map> map) {
+ return map->instance_type() == JS_ARRAY_TYPE &&
+ IsFastElementsKind(map->elements_kind()) &&
+ map->prototype().IsJSArray() &&
+ broker->IsArrayOrObjectPrototype(broker->CanonicalPersistentHandle(
+ JSArray::cast(map->prototype())));
+}
+
+bool SupportsFastArrayResize(JSHeapBroker* broker, Handle<Map> map) {
+ return SupportsFastArrayIteration(broker, map) && map->is_extensible() &&
+ !map->is_dictionary_map() &&
+ !IsReadOnlyLengthDescriptor(broker->isolate(), map);
+}
+
+} // namespace
+
+MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
+ ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind),
+ elements_kind_generalizations_(broker->zone()) {
+ // This lock ensure that MapData can always be background-serialized, i.e.
+ // while the lock is held the Map object may not be modified (except in
+ // benign ways).
+ // TODO(jgruber): Consider removing this lock by being smrt.
+ JSHeapBroker::MapUpdaterGuardIfNeeded mumd_scope(
+ broker, broker->isolate()->map_updater_access());
+
+ // When background serializing the map, we can perform a lite serialization
+ // since the MapRef will read some of the Map's fields can be read directly.
+
+ // Even though MapRefs can read {instance_type} directly, other classes depend
+ // on {instance_type} being serialized.
+ instance_type_ = object->instance_type();
+ instance_size_ = object->instance_size();
+
+ // Both bit_field3 (and below bit_field) are special fields: Even though most
+ // of the individual bits inside of the bitfield could be read / written
+ // non-atomically, the bitfield itself has to use atomic relaxed accessors
+ // since some fields since can be modified in live objects.
+ // TODO(solanes, v8:7790): Assess if adding the exclusive lock in more places
+ // (e.g for set_has_non_instance_prototype) makes sense. Pros: these fields
+ // can use the non-atomic accessors. Cons: We would be acquiring an exclusive
+ // lock in more places.
+ bit_field3_ = object->relaxed_bit_field3();
+ unused_property_fields_ = object->UnusedPropertyFields();
+ is_abandoned_prototype_map_ = object->is_abandoned_prototype_map();
+ in_object_properties_ =
+ object->IsJSObjectMap() ? object->GetInObjectProperties() : 0;
+
+ // These fields are only needed to be serialized when not concurrent inlining
+ // and thus disabling direct reads.
+ if (!broker->is_concurrent_inlining()) {
+ bit_field_ = object->relaxed_bit_field();
+ bit_field2_ = object->bit_field2();
+ can_be_deprecated_ = object->NumberOfOwnDescriptors() > 0
+ ? object->CanBeDeprecated()
+ : false;
+ can_transition_ = object->CanTransition();
+ in_object_properties_start_in_words_ =
+ object->IsJSObjectMap() ? object->GetInObjectPropertiesStartInWords()
+ : 0;
+ next_free_property_index_ = object->NextFreePropertyIndex();
+ constructor_function_index_ = object->IsPrimitiveMap()
+ ? object->GetConstructorFunctionIndex()
+ : Map::kNoConstructorFunctionIndex;
+ supports_fast_array_iteration_ = SupportsFastArrayIteration(broker, object);
+ supports_fast_array_resize_ = SupportsFastArrayResize(broker, object);
+ }
+}
+
+JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSFunction> object)
+ : JSObjectData(broker, storage, object),
+ has_feedback_vector_(object->has_feedback_vector()),
+ has_initial_map_(object->has_prototype_slot() &&
+ object->has_initial_map()),
+ has_prototype_(object->has_prototype_slot() && object->has_prototype()),
+ PrototypeRequiresRuntimeLookup_(
+ object->PrototypeRequiresRuntimeLookup()) {}
+
+void JSFunctionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "JSFunctionData::Serialize");
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object());
+
+ DCHECK_NULL(context_);
+ DCHECK_NULL(native_context_);
+ DCHECK_NULL(initial_map_);
+ DCHECK_NULL(prototype_);
+ DCHECK_NULL(shared_);
+
+ context_ = broker->GetOrCreateData(function->context());
+ native_context_ = broker->GetOrCreateData(function->native_context());
+ shared_ = broker->GetOrCreateData(function->shared());
+
+ initial_map_ = has_initial_map()
+ ? broker->GetOrCreateData(function->initial_map())
+ : nullptr;
+ prototype_ = has_prototype() ? broker->GetOrCreateData(function->prototype())
+ : nullptr;
+
+ if (initial_map_ != nullptr) {
+ initial_map_instance_size_with_min_slack_ =
+ function->ComputeInstanceSizeWithMinSlack(broker->isolate());
+ }
+ if (initial_map_ != nullptr && !initial_map_->should_access_heap()) {
+ if (initial_map_->AsMap()->instance_type() == JS_ARRAY_TYPE) {
+ initial_map_->AsMap()->SerializeElementsKindGeneralizations(broker);
+ }
+ initial_map_->AsMap()->SerializeConstructor(broker);
+ // TODO(neis): This is currently only needed for native_context's
+ // object_function, as used by GetObjectCreateMap. If no further use sites
+ // show up, we should move this into NativeContextData::Serialize.
+ initial_map_->AsMap()->SerializePrototype(broker);
+ }
+}
+
+void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
+ DCHECK(serialized_);
+ if (serialized_code_and_feedback_) return;
+ serialized_code_and_feedback_ = true;
+
+ TraceScope tracer(broker, this, "JSFunctionData::SerializeCodeAndFeedback");
+ Handle<JSFunction> function = Handle<JSFunction>::cast(object());
+
+ DCHECK_NULL(feedback_cell_);
+ DCHECK_NULL(feedback_vector_);
+ DCHECK_NULL(code_);
+ if (!broker->is_concurrent_inlining()) {
+ // This is conditionalized because Code objects are never serialized now.
+ // We only need to represent the code object in serialized data when
+ // we're unable to perform direct heap accesses.
+ code_ = broker->GetOrCreateData(function->code(kAcquireLoad));
+ }
+ feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
+ feedback_vector_ = has_feedback_vector()
+ ? broker->GetOrCreateData(function->feedback_vector())
+ : nullptr;
+}
+
+void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
+ if (serialized_elements_kind_generalizations_) return;
+ serialized_elements_kind_generalizations_ = true;
+
+ TraceScope tracer(broker, this,
+ "MapData::SerializeElementsKindGeneralizations");
+ DCHECK_EQ(instance_type(), JS_ARRAY_TYPE);
+ MapRef self(broker, this);
+ ElementsKind from_kind = self.elements_kind();
+ DCHECK(elements_kind_generalizations_.empty());
+ for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
+ ElementsKind to_kind = static_cast<ElementsKind>(i);
+ if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
+ Handle<Map> target =
+ Map::AsElementsKind(broker->isolate(), self.object(), to_kind);
+ elements_kind_generalizations_.push_back(broker->GetOrCreateData(target));
+ }
+ }
+}
+
+class DescriptorArrayData : public HeapObjectData {
+ public:
+ DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<DescriptorArray> object)
+ : HeapObjectData(broker, storage, object), contents_(broker->zone()) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+
+ ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_owner;
+ }
+
+ PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).details;
+ }
+
+ ObjectData* GetPropertyKey(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).key;
+ }
+
+ FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_index;
+ }
+
+ ObjectData* GetFieldType(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).field_type;
+ }
+
+ ObjectData* GetStrongValue(InternalIndex descriptor_index) const {
+ return contents_.at(descriptor_index.as_int()).value;
+ }
+
+ bool serialized_descriptor(InternalIndex descriptor_index) const {
+ return contents_.find(descriptor_index.as_int()) != contents_.end();
+ }
+
+ void SerializeDescriptor(JSHeapBroker* broker, Handle<Map> map,
+ InternalIndex descriptor_index);
+
+ private:
+ ZoneMap<int, PropertyDescriptor> contents_;
+};
+
+void DescriptorArrayData::SerializeDescriptor(JSHeapBroker* broker,
+ Handle<Map> map,
+ InternalIndex descriptor_index) {
+ CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
+ if (contents_.find(descriptor_index.as_int()) != contents_.end()) return;
+
+ Isolate* const isolate = broker->isolate();
+ auto descriptors = Handle<DescriptorArray>::cast(object());
+ CHECK_EQ(*descriptors, map->instance_descriptors(isolate));
+
+ PropertyDescriptor d;
+ d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
+ MaybeObject value = descriptors->GetValue(descriptor_index);
+ HeapObject obj;
+ if (value.GetHeapObjectIfStrong(&obj)) {
+ d.value = broker->GetOrCreateData(obj);
+ }
+ d.details = descriptors->GetDetails(descriptor_index);
+ if (d.details.location() == kField) {
+ d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
+ d.field_owner =
+ broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
+ d.field_type =
+ broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
+ }
+ contents_[descriptor_index.as_int()] = d;
+
+ if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
+ // Recurse on the owner map.
+ d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
+ }
+
+ TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
+ << this << " (" << contents_.size()
+ << " total)");
+}
+
+class FeedbackCellData : public HeapObjectData {
+ public:
+ FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FeedbackCell> object);
+
+ ObjectData* value() const { return value_; }
+
+ private:
+ ObjectData* const value_;
+};
+
+FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FeedbackCell> object)
+ : HeapObjectData(broker, storage, object),
+ value_(object->value().IsFeedbackVector()
+ ? broker->GetOrCreateData(object->value())
+ : nullptr) {
+ DCHECK(!broker->is_concurrent_inlining());
+}
+
+class FeedbackVectorData : public HeapObjectData {
+ public:
+ FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FeedbackVector> object);
+
+ double invocation_count() const { return invocation_count_; }
+
+ ObjectData* shared_function_info() {
+ CHECK(serialized_);
+ return shared_function_info_;
+ }
+
+ void Serialize(JSHeapBroker* broker);
+ bool serialized() const { return serialized_; }
+ ObjectData* GetClosureFeedbackCell(JSHeapBroker* broker, int index) const;
+
+ private:
+ double const invocation_count_;
+
+ bool serialized_ = false;
+ ObjectData* shared_function_info_;
+ ZoneVector<ObjectData*> closure_feedback_cell_array_;
+};
+
+FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<FeedbackVector> object)
+ : HeapObjectData(broker, storage, object),
+ invocation_count_(object->invocation_count()),
+ closure_feedback_cell_array_(broker->zone()) {
+ DCHECK(!broker->is_concurrent_inlining());
+}
+
+ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
+ int index) const {
+ CHECK_GE(index, 0);
+
+ size_t cell_array_size = closure_feedback_cell_array_.size();
+ if (!serialized_) {
+ DCHECK_EQ(cell_array_size, 0);
+ TRACE_BROKER_MISSING(broker,
+ " closure feedback cell array for vector " << this);
+ return nullptr;
+ }
+ CHECK_LT(index, cell_array_size);
+ return closure_feedback_cell_array_[index];
+}
+
+void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
+ Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
+ Handle<SharedFunctionInfo> sfi(vector->shared_function_info(),
+ broker->isolate());
+ shared_function_info_ = broker->GetOrCreateData(sfi);
+ DCHECK(closure_feedback_cell_array_.empty());
+ int length = vector->closure_feedback_cell_array().length();
+ closure_feedback_cell_array_.reserve(length);
+ for (int i = 0; i < length; ++i) {
+ Handle<FeedbackCell> cell = vector->GetClosureFeedbackCell(i);
+ ObjectData* cell_data = broker->GetOrCreateData(cell);
+ closure_feedback_cell_array_.push_back(cell_data);
+ }
+ TRACE(broker, "Copied " << length << " feedback cells");
+}
+
+class FixedArrayBaseData : public HeapObjectData {
+ public:
+ FixedArrayBaseData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedArrayBase> object, ObjectDataKind kind)
+ : HeapObjectData(broker, storage, object, kind),
+ length_(object->length()) {}
+
+ int length() const { return length_; }
+
+ private:
+ int const length_;
+};
+
+class FixedArrayData : public FixedArrayBaseData {
+ public:
+ FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedArray> object, ObjectDataKind kind)
+ : FixedArrayBaseData(broker, storage, object, kind) {}
+};
+
+class ObjectBoilerplateDescriptionData : public FixedArrayData {
+ public:
+ ObjectBoilerplateDescriptionData(
+ JSHeapBroker* broker, ObjectData** storage,
+ Handle<ObjectBoilerplateDescription> object,
+ ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
+ : FixedArrayData(broker, storage, object, kind), size_(object->size()) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+
+ int size() const { return size_; }
+
+ private:
+ int const size_;
+};
+
+// Only used in JSNativeContextSpecialization.
+class ScriptContextTableData : public FixedArrayData {
+ public:
+ ScriptContextTableData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<ScriptContextTable> object, ObjectDataKind kind)
+ : FixedArrayData(broker, storage, object, kind) {}
+};
+
+JSDataViewData::JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSDataView> object)
+ : JSObjectData(broker, storage, object),
+ byte_length_(object->byte_length()) {}
+
+JSBoundFunctionData::JSBoundFunctionData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<JSBoundFunction> object)
+ : JSObjectData(broker, storage, object) {}
+
+bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return true;
+ if (broker->StackHasOverflowed()) return false;
+
+ TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
+ Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
+
+ // We don't immediately set {serialized_} in order to correctly handle the
+ // case where a recursive call to this method reaches the stack limit.
+
+ DCHECK_NULL(bound_target_function_);
+ bound_target_function_ =
+ broker->GetOrCreateData(function->bound_target_function());
+ bool serialized_nested = true;
+ if (!bound_target_function_->should_access_heap()) {
+ if (bound_target_function_->IsJSBoundFunction()) {
+ serialized_nested =
+ bound_target_function_->AsJSBoundFunction()->Serialize(broker);
+ } else if (bound_target_function_->IsJSFunction()) {
+ bound_target_function_->AsJSFunction()->Serialize(broker);
+ }
+ }
+ if (!serialized_nested) {
+ // We couldn't serialize all nested bound functions due to stack
+ // overflow. Give up.
+ DCHECK(!serialized_);
+ bound_target_function_ = nullptr; // Reset to sync with serialized_.
+ return false;
+ }
+
+ serialized_ = true;
+
+ DCHECK_NULL(bound_arguments_);
+ bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
+
+ DCHECK_NULL(bound_this_);
+ bound_this_ = broker->GetOrCreateData(function->bound_this());
+
+ return true;
+}
+
+JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSObject> object)
+ : JSReceiverData(broker, storage, object,
+ ObjectDataKind::kSerializedHeapObject),
+ inobject_fields_(broker->zone()),
+ own_constant_elements_(broker->zone()),
+ own_properties_(broker->zone()) {}
+
+class FixedDoubleArrayData : public FixedArrayBaseData {
+ public:
+ FixedDoubleArrayData(
+ JSHeapBroker* broker, ObjectData** storage,
+ Handle<FixedDoubleArray> object,
+ ObjectDataKind kind = ObjectDataKind::kNeverSerializedHeapObject)
+ : FixedArrayBaseData(broker, storage, object, kind) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+};
+
+class BytecodeArrayData : public FixedArrayBaseData {
+ public:
+ int register_count() const { return register_count_; }
+ int parameter_count() const { return parameter_count_; }
+ interpreter::Register incoming_new_target_or_generator_register() const {
+ return incoming_new_target_or_generator_register_;
+ }
+
+ BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<BytecodeArray> object)
+ : FixedArrayBaseData(broker, storage, object,
+ ObjectDataKind::kNeverSerializedHeapObject),
+ register_count_(object->register_count()),
+ parameter_count_(object->parameter_count()),
+ incoming_new_target_or_generator_register_(
+ object->incoming_new_target_or_generator_register()) {}
+
+ private:
+ int const register_count_;
+ int const parameter_count_;
+ interpreter::Register const incoming_new_target_or_generator_register_;
+};
+
+class JSArrayData : public JSObjectData {
+ public:
+ JSArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSArray> object);
+
+ void Serialize(JSHeapBroker* broker);
+ ObjectData* length() const {
+ CHECK(serialized_);
+ return length_;
+ }
+
+ ObjectData* GetOwnElement(
+ JSHeapBroker* broker, uint32_t index,
+ SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+
+ private:
+ bool serialized_ = false;
+ ObjectData* length_ = nullptr;
+
+ // Elements (indexed properties) that either
+ // (1) are known to exist directly on the object, or
+ // (2) are known not to (possibly they don't exist at all).
+ // In case (2), the second pair component is nullptr.
+ ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
+};
+
+JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSArray> object)
+ : JSObjectData(broker, storage, object), own_elements_(broker->zone()) {}
+
+void JSArrayData::Serialize(JSHeapBroker* broker) {
+ CHECK(!broker->is_concurrent_inlining());
+
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "JSArrayData::Serialize");
+ Handle<JSArray> jsarray = Handle<JSArray>::cast(object());
+
+ DCHECK_NULL(length_);
+ length_ = broker->GetOrCreateData(jsarray->length());
+}
+
+ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
+ SerializationPolicy policy) {
+ for (auto const& p : own_elements_) {
+ if (p.first == index) return p.second;
+ }
+
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
+ return nullptr;
+ }
+
+ base::Optional<ObjectRef> element =
+ GetOwnElementFromHeap(broker, object(), index, false);
+ ObjectData* result = element.has_value() ? element->data() : nullptr;
+ own_elements_.push_back({index, result});
+ return result;
+}
+
+class ScopeInfoData : public HeapObjectData {
+ public:
+ ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<ScopeInfo> object);
+
+ int ContextLength() const { return context_length_; }
+ bool HasContextExtensionSlot() const { return has_context_extension_slot_; }
+ bool HasOuterScopeInfo() const { return has_outer_scope_info_; }
+
+ ObjectData* OuterScopeInfo() const { return outer_scope_info_; }
+ void SerializeScopeInfoChain(JSHeapBroker* broker);
+
+ private:
+ int const context_length_;
+ bool const has_context_extension_slot_;
+ bool const has_outer_scope_info_;
+
+ // Only serialized via SerializeScopeInfoChain.
+ ObjectData* outer_scope_info_;
+};
+
+ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<ScopeInfo> object)
+ : HeapObjectData(broker, storage, object),
+ context_length_(object->ContextLength()),
+ has_context_extension_slot_(object->HasContextExtensionSlot()),
+ has_outer_scope_info_(object->HasOuterScopeInfo()),
+ outer_scope_info_(nullptr) {
+ DCHECK(!broker->is_concurrent_inlining());
+}
+
+void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
+ if (outer_scope_info_) return;
+ if (!has_outer_scope_info_) return;
+ outer_scope_info_ = broker->GetOrCreateData(
+ Handle<ScopeInfo>::cast(object())->OuterScopeInfo());
+ if (!outer_scope_info_->should_access_heap()) {
+ outer_scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
+ }
+}
+
+class SharedFunctionInfoData : public HeapObjectData {
+ public:
+ SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<SharedFunctionInfo> object);
+
+ int builtin_id() const { return builtin_id_; }
+ int context_header_size() const { return context_header_size_; }
+ ObjectData* GetBytecodeArray() const { return GetBytecodeArray_; }
+ SharedFunctionInfo::Inlineability GetInlineability() const {
+ return inlineability_;
+ }
+ void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
+ ObjectData* scope_info() const { return scope_info_; }
+ void SerializeScopeInfoChain(JSHeapBroker* broker);
+ ObjectData* function_template_info() const { return function_template_info_; }
+ ObjectData* GetTemplateObject(FeedbackSlot slot) const {
+ auto lookup_it = template_objects_.find(slot.ToInt());
+ if (lookup_it != template_objects_.cend()) {
+ return lookup_it->second;
+ }
+ return nullptr;
+ }
+ void SetTemplateObject(FeedbackSlot slot, ObjectData* object) {
+ CHECK(
+ template_objects_.insert(std::make_pair(slot.ToInt(), object)).second);
+ }
+
+#define DECL_ACCESSOR(type, name) \
+ type name() const { return name##_; }
+ BROKER_SFI_FIELDS(DECL_ACCESSOR)
+#undef DECL_ACCESSOR
+
+ private:
+ int const builtin_id_;
+ int const context_header_size_;
+ ObjectData* const GetBytecodeArray_;
+#define DECL_MEMBER(type, name) type const name##_;
+ BROKER_SFI_FIELDS(DECL_MEMBER)
+#undef DECL_MEMBER
+ SharedFunctionInfo::Inlineability const inlineability_;
+ ObjectData* function_template_info_;
+ ZoneMap<int, ObjectData*> template_objects_;
+ ObjectData* scope_info_;
+};
+
+SharedFunctionInfoData::SharedFunctionInfoData(
+ JSHeapBroker* broker, ObjectData** storage,
+ Handle<SharedFunctionInfo> object)
+ : HeapObjectData(broker, storage, object),
+ builtin_id_(object->HasBuiltinId() ? object->builtin_id()
+ : Builtins::kNoBuiltinId),
+ context_header_size_(object->scope_info().ContextHeaderLength()),
+ GetBytecodeArray_(object->HasBytecodeArray()
+ ? broker->GetOrCreateData(
+ object->GetBytecodeArray(broker->isolate()))
+ : nullptr)
+#define INIT_MEMBER(type, name) , name##_(object->name())
+ BROKER_SFI_FIELDS(INIT_MEMBER)
+#undef INIT_MEMBER
+ ,
+ inlineability_(object->GetInlineability(broker->isolate())),
+ function_template_info_(nullptr),
+ template_objects_(broker->zone()),
+ scope_info_(nullptr) {
+ DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
+ DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
+}
+
+void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
+ JSHeapBroker* broker) {
+ if (function_template_info_) return;
+ function_template_info_ = broker->GetOrCreateData(
+ Handle<SharedFunctionInfo>::cast(object())->function_data(kAcquireLoad));
+}
+
+void SharedFunctionInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
+ if (scope_info_) return;
+ scope_info_ = broker->GetOrCreateData(
+ Handle<SharedFunctionInfo>::cast(object())->scope_info());
+ if (!scope_info_->should_access_heap()) {
+ scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
+ }
+}
+
+class SourceTextModuleData : public HeapObjectData {
+ public:
+ SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<SourceTextModule> object);
+ void Serialize(JSHeapBroker* broker);
+
+ ObjectData* GetCell(JSHeapBroker* broker, int cell_index) const;
+ ObjectData* GetImportMeta(JSHeapBroker* broker) const;
+
+ private:
+ bool serialized_ = false;
+ ZoneVector<ObjectData*> imports_;
+ ZoneVector<ObjectData*> exports_;
+ ObjectData* import_meta_;
+};
+
+SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<SourceTextModule> object)
+ : HeapObjectData(broker, storage, object),
+ imports_(broker->zone()),
+ exports_(broker->zone()),
+ import_meta_(nullptr) {}
+
+ObjectData* SourceTextModuleData::GetCell(JSHeapBroker* broker,
+ int cell_index) const {
+ if (!serialized_) {
+ DCHECK(imports_.empty());
+ TRACE_BROKER_MISSING(broker,
+ "module cell " << cell_index << " on " << this);
+ return nullptr;
+ }
+ ObjectData* cell;
+ switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
+ case SourceTextModuleDescriptor::kImport:
+ cell = imports_.at(SourceTextModule::ImportIndex(cell_index));
+ break;
+ case SourceTextModuleDescriptor::kExport:
+ cell = exports_.at(SourceTextModule::ExportIndex(cell_index));
+ break;
+ case SourceTextModuleDescriptor::kInvalid:
+ UNREACHABLE();
+ }
+ CHECK_NOT_NULL(cell);
+ return cell;
+}
+
+ObjectData* SourceTextModuleData::GetImportMeta(JSHeapBroker* broker) const {
+ CHECK(serialized_);
+ return import_meta_;
+}
+
+void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return;
+ serialized_ = true;
+
+ TraceScope tracer(broker, this, "SourceTextModuleData::Serialize");
+ Handle<SourceTextModule> module = Handle<SourceTextModule>::cast(object());
+
+ // TODO(neis): We could be smarter and only serialize the cells we care about.
+ // TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector.
+
+ DCHECK(imports_.empty());
+ Handle<FixedArray> imports(module->regular_imports(), broker->isolate());
+ int const imports_length = imports->length();
+ imports_.reserve(imports_length);
+ for (int i = 0; i < imports_length; ++i) {
+ imports_.push_back(broker->GetOrCreateData(imports->get(i)));
+ }
+ TRACE(broker, "Copied " << imports_.size() << " imports");
+
+ DCHECK(exports_.empty());
+ Handle<FixedArray> exports(module->regular_exports(), broker->isolate());
+ int const exports_length = exports->length();
+ exports_.reserve(exports_length);
+ for (int i = 0; i < exports_length; ++i) {
+ exports_.push_back(broker->GetOrCreateData(exports->get(i)));
+ }
+ TRACE(broker, "Copied " << exports_.size() << " exports");
+
+ DCHECK_NULL(import_meta_);
+ import_meta_ = broker->GetOrCreateData(module->import_meta(kAcquireLoad));
+ TRACE(broker, "Copied import_meta");
+}
+
+class CellData : public HeapObjectData {
+ public:
+ CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+};
+
+class JSGlobalObjectData : public JSObjectData {
+ public:
+ JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSGlobalObject> object);
+ bool IsDetached() const { return is_detached_; }
+
+ ObjectData* GetPropertyCell(
+ JSHeapBroker* broker, ObjectData* name,
+ SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+
+ private:
+ bool const is_detached_;
+
+ // Properties that either
+ // (1) are known to exist as property cells on the global object, or
+ // (2) are known not to (possibly they don't exist at all).
+ // In case (2), the second pair component is nullptr.
+ ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
+};
+
+JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
+ ObjectData** storage,
+ Handle<JSGlobalObject> object)
+ : JSObjectData(broker, storage, object),
+ is_detached_(object->IsDetached()),
+ properties_(broker->zone()) {}
+
+class JSGlobalProxyData : public JSObjectData {
+ public:
+ JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSGlobalProxy> object);
+};
+
+JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<JSGlobalProxy> object)
+ : JSObjectData(broker, storage, object) {}
+
+namespace {
+
+base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
+ Handle<Name> name) {
+ LookupIterator it(
+ broker->isolate(),
+ handle(broker->target_native_context().object()->global_object(),
+ broker->isolate()),
+ name, LookupIterator::OWN);
+ it.TryLookupCachedProperty();
+ if (it.state() == LookupIterator::DATA &&
+ it.GetHolder<JSObject>()->IsJSGlobalObject()) {
+ return TryMakeRef(broker, it.GetPropertyCell());
+ }
+ return base::nullopt;
+}
+
+} // namespace
+
+ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
+ ObjectData* name,
+ SerializationPolicy policy) {
+ CHECK_NOT_NULL(name);
+ for (auto const& p : properties_) {
+ if (p.first == name) return p.second;
+ }
+
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_MISSING(broker, "knowledge about global property " << name);
+ return nullptr;
+ }
+
+ ObjectData* result = nullptr;
+ base::Optional<PropertyCellRef> cell =
+ GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
+ if (cell.has_value()) {
+ result = cell->data();
+ if (!result->should_access_heap()) {
+ result->AsPropertyCell()->Serialize(broker);
+ }
+ }
+ properties_.push_back({name, result});
+ return result;
+}
+
+class TemplateObjectDescriptionData : public HeapObjectData {
+ public:
+ TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<TemplateObjectDescription> object)
+ : HeapObjectData(broker, storage, object) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+};
+
+class CodeData : public HeapObjectData {
+ public:
+ CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
+ : HeapObjectData(broker, storage, object),
+ inlined_bytecode_size_(object->inlined_bytecode_size() > 0 &&
+ !object->marked_for_deoptimization()
+ ? object->inlined_bytecode_size()
+ : 0) {
+ DCHECK(!broker->is_concurrent_inlining());
+ }
+
+ unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
+
+ private:
+ unsigned const inlined_bytecode_size_;
+};
+
+#define DEFINE_IS(Name, ...) \
+ bool ObjectData::Is##Name() const { \
+ if (should_access_heap()) { \
+ return object()->Is##Name(); \
+ } \
+ if (is_smi()) return false; \
+ InstanceType instance_type = \
+ static_cast<const HeapObjectData*>(this)->GetMapInstanceType(); \
+ return InstanceTypeChecker::Is##Name(instance_type); \
+ }
+HEAP_BROKER_OBJECT_LIST(DEFINE_IS)
+#undef DEFINE_IS
+
+#define DEFINE_AS(Name, Kind) \
+ Name##Data* ObjectData::As##Name() { \
+ CHECK(Is##Name()); \
+ CHECK(kind_ == kSerializedHeapObject || \
+ kind_ == kBackgroundSerializedHeapObject); \
+ return static_cast<Name##Data*>(this); \
+ }
+HEAP_BROKER_OBJECT_LIST(DEFINE_AS)
+#undef DEFINE_AS
+
+ObjectData* JSObjectData::GetInobjectField(int property_index) const {
+ CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
+ return inobject_fields_[property_index];
+}
+
+bool JSObjectData::cow_or_empty_elements_tenured() const {
+ return cow_or_empty_elements_tenured_;
+}
+
+ObjectData* JSObjectData::elements() const {
+ CHECK(serialized_elements_);
+ return elements_;
+}
+
+void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
+ SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
+}
+
+void JSObjectData::SerializeElements(JSHeapBroker* broker) {
+ if (serialized_elements_) return;
+ serialized_elements_ = true;
+
+ TraceScope tracer(broker, this, "JSObjectData::SerializeElements");
+ Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
+ Handle<FixedArrayBase> elements_object(boilerplate->elements(),
+ broker->isolate());
+ DCHECK_NULL(elements_);
+ elements_ = broker->GetOrCreateData(elements_object);
+ DCHECK(elements_->IsFixedArrayBase());
+}
+
+void MapData::SerializeConstructor(JSHeapBroker* broker) {
+ if (serialized_constructor_) return;
+ serialized_constructor_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeConstructor");
+ Handle<Map> map = Handle<Map>::cast(object());
+ DCHECK(!map->IsContextMap());
+ DCHECK_NULL(constructor_);
+ constructor_ = broker->GetOrCreateData(map->GetConstructor());
+}
+
+void MapData::SerializeBackPointer(JSHeapBroker* broker) {
+ if (serialized_backpointer_) return;
+ serialized_backpointer_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeBackPointer");
+ Handle<Map> map = Handle<Map>::cast(object());
+ DCHECK_NULL(backpointer_);
+ DCHECK(!map->IsContextMap());
+ backpointer_ = broker->GetOrCreateData(map->GetBackPointer());
+}
+
+bool MapData::TrySerializePrototype(JSHeapBroker* broker) {
+ if (serialized_prototype_) return true;
+
+ TraceScope tracer(broker, this, "MapData::SerializePrototype");
+ Handle<Map> map = Handle<Map>::cast(object());
+ DCHECK_NULL(prototype_);
+ prototype_ = broker->TryGetOrCreateData(map->prototype());
+ if (prototype_ == nullptr) return false;
+ serialized_prototype_ = true;
+ return true;
+}
+
+void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
+ if (serialized_own_descriptors_) return;
+ serialized_own_descriptors_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptors");
+ Handle<Map> map = Handle<Map>::cast(object());
+
+ for (InternalIndex i : map->IterateOwnDescriptors()) {
+ SerializeOwnDescriptor(broker, i);
+ }
+}
+
+bool MapData::TrySerializeOwnDescriptor(JSHeapBroker* broker,
+ InternalIndex descriptor_index) {
+ TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
+ Handle<Map> map = Handle<Map>::cast(object());
+ Isolate* isolate = broker->isolate();
+
+ if (instance_descriptors_ == nullptr) {
+ instance_descriptors_ =
+ broker->TryGetOrCreateData(map->instance_descriptors(kAcquireLoad));
+ if (instance_descriptors_ == nullptr) return false;
+ }
+
+ if (instance_descriptors()->should_access_heap()) {
+ // When accessing the fields concurrently, we still have to recurse on the
+ // owner map if it is different than the current map. This is because
+ // {instance_descriptors_} gets set on SerializeOwnDescriptor and otherwise
+ // we risk the field owner having a null {instance_descriptors_}.
+ Handle<DescriptorArray> descriptors = broker->CanonicalPersistentHandle(
+ map->instance_descriptors(kAcquireLoad));
+ if (descriptors->GetDetails(descriptor_index).location() == kField) {
+ Handle<Map> owner = broker->CanonicalPersistentHandle(
+ map->FindFieldOwner(isolate, descriptor_index));
+ if (!owner.equals(map)) {
+ ObjectData* data = broker->TryGetOrCreateData(owner);
+ if (data == nullptr) return false;
+ data->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
+ }
+ }
+ } else {
+ DescriptorArrayData* descriptors =
+ instance_descriptors()->AsDescriptorArray();
+ descriptors->SerializeDescriptor(broker, map, descriptor_index);
+ }
+
+ return true;
+}
+
+void MapData::SerializeRootMap(JSHeapBroker* broker) {
+ if (serialized_root_map_) return;
+ serialized_root_map_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeRootMap");
+ Handle<Map> map = Handle<Map>::cast(object());
+ DCHECK_NULL(root_map_);
+ root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
+}
+
+ObjectData* MapData::FindRootMap() const { return root_map_; }
+
+void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
+ int depth) {
+ if (serialized_as_boilerplate_) return;
+ serialized_as_boilerplate_ = true;
+
+ TraceScope tracer(broker, this,
+ "JSObjectData::SerializeRecursiveAsBoilerplate");
+ Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
+
+ // We only serialize boilerplates that pass the IsInlinableFastLiteral
+ // check, so we only do a check on the depth here.
+ CHECK_GT(depth, 0);
+
+ // Serialize the elements.
+ Isolate* const isolate = broker->isolate();
+ Handle<FixedArrayBase> elements_object(boilerplate->elements(), isolate);
+
+ // Boilerplates need special serialization - we need to make sure COW arrays
+ // are tenured. Boilerplate objects should only be reachable from their
+ // allocation site, so it is safe to assume that the elements have not been
+ // serialized yet.
+
+ bool const empty_or_cow =
+ elements_object->length() == 0 ||
+ elements_object->map() == ReadOnlyRoots(isolate).fixed_cow_array_map();
+ if (empty_or_cow) {
+ // We need to make sure copy-on-write elements are tenured.
+ if (ObjectInYoungGeneration(*elements_object)) {
+ elements_object = isolate->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(elements_object));
+ boilerplate->set_elements(*elements_object);
+ }
+ cow_or_empty_elements_tenured_ = true;
+ }
+
+ DCHECK_NULL(elements_);
+ DCHECK(!serialized_elements_);
+ serialized_elements_ = true;
+ elements_ = broker->GetOrCreateData(elements_object);
+ DCHECK(elements_->IsFixedArrayBase());
+
+ if (empty_or_cow || elements_->should_access_heap()) {
+ // No need to do anything here. Empty or copy-on-write elements
+ // do not need to be serialized because we only need to store the elements
+ // reference to the allocated object.
+ } else if (boilerplate->HasSmiOrObjectElements()) {
+ Handle<FixedArray> fast_elements =
+ Handle<FixedArray>::cast(elements_object);
+ int length = elements_object->length();
+ for (int i = 0; i < length; i++) {
+ Handle<Object> value(fast_elements->get(i), isolate);
+ if (value->IsJSObject()) {
+ ObjectData* value_data = broker->GetOrCreateData(value);
+ if (!value_data->should_access_heap()) {
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
+ }
+ }
+ }
+ } else {
+ CHECK(boilerplate->HasDoubleElements());
+ CHECK_LE(elements_object->Size(), kMaxRegularHeapObjectSize);
+ }
+
+ // TODO(turbofan): Do we want to support out-of-object properties?
+ CHECK(boilerplate->HasFastProperties() &&
+ boilerplate->property_array().length() == 0);
+ CHECK_EQ(inobject_fields_.size(), 0u);
+
+ // Check the in-object properties.
+ Handle<DescriptorArray> descriptors(
+ boilerplate->map().instance_descriptors(isolate), isolate);
+ for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
+
+ FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+ // Make sure {field_index} agrees with {inobject_properties} on the index of
+ // this field.
+ DCHECK_EQ(field_index.property_index(),
+ static_cast<int>(inobject_fields_.size()));
+ Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
+ // In case of double fields we use a sentinel NaN value to mark
+ // uninitialized fields. A boilerplate value with such a field may migrate
+ // from its double to a tagged representation. The sentinel value carries
+ // no special meaning when it occurs in a heap number, so we would like to
+ // recover the uninitialized value. We check for the sentinel here,
+ // specifically, since migrations might have been triggered as part of
+ // boilerplate serialization.
+ if (!details.representation().IsDouble() && value->IsHeapNumber() &&
+ HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
+ value = isolate->factory()->uninitialized_value();
+ }
+ ObjectData* value_data = broker->GetOrCreateData(value);
+ if (value_data->IsJSObject() && !value_data->should_access_heap()) {
+ value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
+ depth - 1);
+ }
+ inobject_fields_.push_back(value_data);
+ }
+ TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields");
+
+ if (!map()->should_access_heap()) {
+ map()->AsMap()->SerializeOwnDescriptors(broker);
+ }
+
+ if (IsJSArray() && !broker->is_concurrent_inlining()) {
+ AsJSArray()->Serialize(broker);
+ }
+}
+
+void RegExpBoilerplateDescriptionData::Serialize(JSHeapBroker* broker) {
+ if (serialized_) return; // Only serialize once.
+ serialized_ = true;
+
+ TraceScope tracer(broker, this,
+ "RegExpBoilerplateDescriptionData::Serialize");
+ auto boilerplate = Handle<RegExpBoilerplateDescription>::cast(object());
+
+ data_ = broker->GetOrCreateData(boilerplate->data());
+ source_ = broker->GetOrCreateData(boilerplate->source());
+ flags_ = boilerplate->flags();
+}
+
+#ifdef DEBUG
+bool ObjectRef::IsNeverSerializedHeapObject() const {
+ return data_->kind() == ObjectDataKind::kNeverSerializedHeapObject;
+}
+#endif // DEBUG
+
+bool ObjectRef::equals(const ObjectRef& other) const {
+#ifdef DEBUG
+ if (broker()->mode() == JSHeapBroker::kSerialized &&
+ data_->used_status == ObjectData::Usage::kUnused) {
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
+ }
+#endif // DEBUG
+ // TODO(jgruber): Consider going back to reference-equality on data_ once
+ // ObjectData objects are guaranteed to be canonicalized (see also:
+ // ClearReconstructibleData).
+ return data_->object().is_identical_to(other.data_->object());
+}
+
+Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
+
+ContextRef ContextRef::previous(size_t* depth,
+ SerializationPolicy policy) const {
+ DCHECK_NOT_NULL(depth);
+
+ if (data_->should_access_heap()) {
+ Context current = *object();
+ while (*depth != 0 && current.unchecked_previous().IsContext()) {
+ current = Context::cast(current.unchecked_previous());
+ (*depth)--;
+ }
+ return MakeRef(broker(), current);
+ }
+
+ if (*depth == 0) return *this;
+
+ ObjectData* previous_data = data()->AsContext()->previous(broker(), policy);
+ if (previous_data == nullptr || !previous_data->IsContext()) return *this;
+
+ *depth = *depth - 1;
+ return ContextRef(broker(), previous_data).previous(depth, policy);
+}
+
+base::Optional<ObjectRef> ContextRef::get(int index,
+ SerializationPolicy policy) const {
+ CHECK_LE(0, index);
+ if (data_->should_access_heap()) {
+ if (index >= object()->length()) return {};
+ return TryMakeRef(broker(), object()->get(index));
+ }
+ ObjectData* optional_slot =
+ data()->AsContext()->GetSlot(broker(), index, policy);
+ if (optional_slot == nullptr) return {};
+ return ObjectRef(broker(), optional_slot);
+}
+
+SourceTextModuleRef ContextRef::GetModule(SerializationPolicy policy) const {
+ ContextRef current = *this;
+ while (current.map().instance_type() != MODULE_CONTEXT_TYPE) {
+ size_t depth = 1;
+ current = current.previous(&depth, policy);
+ CHECK_EQ(depth, 0);
+ }
+ return current.get(Context::EXTENSION_INDEX, policy)
+ .value()
+ .AsSourceTextModule();
+}
+
+#ifdef DEBUG
+void JSHeapBroker::PrintRefsAnalysis() const {
+ // Usage counts
+ size_t used_total = 0, unused_total = 0, identity_used_total = 0;
+ for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
+ ref = refs_->Next(ref)) {
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused_total;
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used_total;
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used_total;
+ break;
+ }
+ }
+
+ // Ref types analysis
+ TRACE_BROKER_MEMORY(
+ this, "Refs: " << refs_->occupancy() << "; data used: " << used_total
+ << "; only identity used: " << identity_used_total
+ << "; unused: " << unused_total);
+ size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0;
+ size_t used[LAST_TYPE + 1] = {0};
+ size_t unused[LAST_TYPE + 1] = {0};
+ size_t identity_used[LAST_TYPE + 1] = {0};
+ for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
+ ref = refs_->Next(ref)) {
+ if (ref->value->is_smi()) {
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused_smis;
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used_smis;
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used_smis;
+ break;
+ }
+ } else {
+ InstanceType instance_type;
+ if (ref->value->should_access_heap()) {
+ instance_type = Handle<HeapObject>::cast(ref->value->object())
+ ->map()
+ .instance_type();
+ } else {
+ instance_type = ref->value->AsHeapObject()->GetMapInstanceType();
+ }
+ CHECK_LE(FIRST_TYPE, instance_type);
+ CHECK_LE(instance_type, LAST_TYPE);
+ switch (ref->value->used_status) {
+ case ObjectData::Usage::kUnused:
+ ++unused[instance_type];
+ break;
+ case ObjectData::Usage::kOnlyIdentityUsed:
+ ++identity_used[instance_type];
+ break;
+ case ObjectData::Usage::kDataUsed:
+ ++used[instance_type];
+ break;
+ }
+ }
+ }
+
+ TRACE_BROKER_MEMORY(
+ this, "Smis: " << used_smis + identity_used_smis + unused_smis
+ << "; data used: " << used_smis << "; only identity used: "
+ << identity_used_smis << "; unused: " << unused_smis);
+ for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) {
+ size_t total = used[i] + identity_used[i] + unused[i];
+ if (total == 0) continue;
+ TRACE_BROKER_MEMORY(
+ this, InstanceType(i) << ": " << total << "; data used: " << used[i]
+ << "; only identity used: " << identity_used[i]
+ << "; unused: " << unused[i]);
+ }
+}
+#endif // DEBUG
+
+void JSHeapBroker::InitializeAndStartSerializing() {
+ TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing");
+
+ CHECK_EQ(mode_, kDisabled);
+ mode_ = kSerializing;
+
+ // Throw away the dummy data that we created while disabled.
+ refs_->Clear();
+ refs_ =
+ zone()->New<RefsMap>(kInitialRefsBucketCount, AddressMatcher(), zone());
+
+ CollectArrayAndObjectPrototypes();
+
+ SetTargetNativeContextRef(target_native_context().object());
+ target_native_context().Serialize();
+ if (!is_concurrent_inlining()) {
+ // Perform full native context serialization now if we can't do it later on
+ // the background thread.
+ target_native_context().SerializeOnBackground();
+ }
+
+ Factory* const f = isolate()->factory();
+ if (!is_concurrent_inlining()) {
+ ObjectData* data;
+ data = GetOrCreateData(f->array_buffer_detaching_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->array_constructor_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->array_iterator_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->array_species_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->no_elements_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->promise_hook_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->promise_species_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->promise_then_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ data = GetOrCreateData(f->string_length_protector());
+ if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
+ }
+ GetOrCreateData(f->many_closures_cell());
+ GetOrCreateData(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, true));
+
+ TRACE(this, "Finished serializing standard objects");
+}
+
+namespace {
+
+template <RefSerializationKind Kind, class DataT, class ObjectT>
+struct CreateDataFunctor {
+ bool operator()(JSHeapBroker* broker, RefsMap* refs,
+ Handle<Object> object, RefsMap::Entry** entry_out,
+ ObjectData** object_data_out) {
+ USE(broker, refs, object, entry_out, object_data_out);
+ UNREACHABLE();
+ }
+};
+
+template <class DataT, class ObjectT>
+struct CreateDataFunctor<RefSerializationKind::kSerialized, DataT, ObjectT> {
+ bool operator()(JSHeapBroker* broker, RefsMap* refs,
+ Handle<Object> object, RefsMap::Entry** entry_out,
+ ObjectData** object_data_out) {
+ if (broker->mode() == JSHeapBroker::kSerializing) {
+ RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
+ *object_data_out = broker->zone()->New<DataT>(
+ broker, &entry->value, Handle<ObjectT>::cast(object));
+ *entry_out = entry;
+ return true;
+ }
+ return false;
+ }
+};
+
+template <class DataT, class ObjectT>
+struct CreateDataFunctor<RefSerializationKind::kBackgroundSerialized, DataT,
+ ObjectT> {
+ bool operator()(JSHeapBroker* broker, RefsMap* refs,
+ Handle<Object> object, RefsMap::Entry** entry_out,
+ ObjectData** object_data_out) {
+ if (broker->is_concurrent_inlining()) {
+ RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
+ *object_data_out = broker->zone()->New<DataT>(
+ broker, &entry->value, Handle<ObjectT>::cast(object),
+ kBackgroundSerializedHeapObject);
+ *entry_out = entry;
+ return true;
+ } else if (broker->mode() == JSHeapBroker::kSerializing) {
+ RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
+ *object_data_out = broker->zone()->New<DataT>(
+ broker, &entry->value, Handle<ObjectT>::cast(object),
+ ObjectDataKind::kSerializedHeapObject);
+ *entry_out = entry;
+ return true;
+ }
+ return false;
+ }
+};
+
+template <class DataT, class ObjectT>
+struct CreateDataFunctor<RefSerializationKind::kNeverSerialized, DataT,
+ ObjectT> {
+ bool operator()(JSHeapBroker* broker, RefsMap* refs, Handle<Object> object,
+ RefsMap::Entry** entry_out, ObjectData** object_data_out) {
+ // TODO(solanes, v8:10866): Remove the `(mode() == kSerializing)` case
+ // below when all classes skip serialization. Same for similar spots if we
+ // end up keeping them.
+ if (broker->is_concurrent_inlining()) {
+ RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
+ *object_data_out = broker->zone()->New<ObjectData>(
+ broker, &entry->value, object, kNeverSerializedHeapObject);
+ *entry_out = entry;
+ return true;
+ } else if (broker->mode() == JSHeapBroker::kSerializing) {
+ RefsMap::Entry* entry = refs->LookupOrInsert(object.address());
+ *object_data_out = broker->zone()->New<DataT>(
+ broker, &entry->value, Handle<ObjectT>::cast(object));
+ *entry_out = entry;
+ return true;
+ }
+ return false;
+ }
+};
+
+} // namespace
+
+void JSHeapBroker::ClearReconstructibleData() {
+ RefsMap::Entry* p = refs_->Start();
+ while (p != nullptr) {
+ Address key = p->key;
+ ObjectData* value = p->value;
+ p = refs_->Next(p);
+ const auto kind = RefSerializationKindOf(value);
+ if (kind == RefSerializationKind::kNeverSerialized ||
+ kind == RefSerializationKind::kBackgroundSerialized) {
+ if (value->IsMap() &&
+ value->kind() == ObjectDataKind::kBackgroundSerializedHeapObject &&
+ value->AsMap()->has_extra_serialized_data()) {
+ continue;
+ }
+ // Can be reconstructed from the background thread.
+ CHECK_NOT_NULL(refs_->Remove(key));
+ }
+ }
+}
+
+ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
+ GetOrCreateDataFlags flags) {
+ RefsMap::Entry* entry = refs_->Lookup(object.address());
+ if (entry != nullptr) return entry->value;
+
+ if (mode() == JSHeapBroker::kDisabled) {
+ entry = refs_->LookupOrInsert(object.address());
+ ObjectData** storage = &entry->value;
+ if (*storage == nullptr) {
+ entry->value = zone()->New<ObjectData>(
+ this, storage, object,
+ object->IsSmi() ? kSmi : kUnserializedHeapObject);
+ }
+ return *storage;
+ }
+
+ CHECK(mode() == JSHeapBroker::kSerializing ||
+ mode() == JSHeapBroker::kSerialized);
+
+ ObjectData* object_data;
+ if (object->IsSmi()) {
+ entry = refs_->LookupOrInsert(object.address());
+ return zone()->New<ObjectData>(this, &entry->value, object, kSmi);
+ }
+
+ DCHECK(!object->IsSmi());
+
+ const bool crash_on_error = (flags & kCrashOnError) != 0;
+
+ // TODO(jgruber): Remove this flag check (and the flag) once TSAN failures
+ // are fixed.
+ // See also: crbug.com/v8/11779
+ if (FLAG_turbo_concurrent_inlining_check_ispendingallocation) {
+ if ((flags & kAssumeMemoryFence) == 0 &&
+ ObjectMayBeUninitialized(HeapObject::cast(*object))) {
+ TRACE_BROKER_MISSING(this, "Object may be uninitialized " << *object);
+ CHECK_WITH_MSG(!crash_on_error, "Ref construction failed");
+ return nullptr;
+ }
+ }
+
+ if (IsReadOnlyHeapObjectForCompiler(HeapObject::cast(*object))) {
+ entry = refs_->LookupOrInsert(object.address());
+ return zone()->New<ObjectData>(this, &entry->value, object,
+ kUnserializedReadOnlyHeapObject);
+ }
+
+#define CREATE_DATA(Name, Kind) \
+ if (object->Is##Name()) { \
+ CreateDataFunctor<Kind, Name##Data, Name> f; \
+ if (!f(this, refs_, object, &entry, &object_data)) { \
+ CHECK_WITH_MSG(!crash_on_error, "Ref construction failed"); \
+ return nullptr; \
+ } \
+ /* NOLINTNEXTLINE(readability/braces) */ \
+ } else
+ HEAP_BROKER_OBJECT_LIST(CREATE_DATA)
+#undef CREATE_DATA
+ {
+ UNREACHABLE();
+ }
+ // At this point the entry pointer is not guaranteed to be valid as
+ // the refs_ hash hable could be resized by one of the constructors above.
+ DCHECK_EQ(object_data, refs_->Lookup(object.address())->value);
+ return object_data;
+}
+
+#define DEFINE_IS_AND_AS(Name, ...) \
+ bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
+ Name##Ref ObjectRef::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return Name##Ref(broker(), data()); \
+ }
+HEAP_BROKER_OBJECT_LIST(DEFINE_IS_AND_AS)
+#undef DEFINE_IS_AND_AS
+
+bool ObjectRef::IsSmi() const { return data()->is_smi(); }
+
+int ObjectRef::AsSmi() const {
+ DCHECK(IsSmi());
+ // Handle-dereference is always allowed for Handle<Smi>.
+ return Handle<Smi>::cast(object())->value();
+}
+
+base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
+ if (data_->should_access_heap()) {
+ Handle<Map> instance_map;
+ if (Map::TryGetObjectCreateMap(broker()->isolate(), object())
+ .ToHandle(&instance_map)) {
+ return MakeRef(broker(), instance_map);
+ } else {
+ return base::Optional<MapRef>();
+ }
+ }
+ ObjectData* map_data = data()->AsJSObject()->object_create_map(broker());
+ if (map_data == nullptr) return base::Optional<MapRef>();
+ if (map_data->should_access_heap()) {
+ return MakeRef(broker(), Handle<Map>::cast(map_data->object()));
+ }
+ return MapRef(broker(), map_data->AsMap());
+}
+
+#define DEF_TESTER(Type, ...) \
+ bool MapRef::Is##Type##Map() const { \
+ return InstanceTypeChecker::Is##Type(instance_type()); \
+ }
+INSTANCE_TYPE_CHECKERS(DEF_TESTER)
+#undef DEF_TESTER
+
+base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
+ if (data_->should_access_heap()) {
+ return MakeRef(broker(),
+ Map::AsElementsKind(broker()->isolate(), object(), kind));
+ }
+ if (kind == elements_kind()) return *this;
+ const ZoneVector<ObjectData*>& elements_kind_generalizations =
+ data()->AsMap()->elements_kind_generalizations();
+ for (auto data : elements_kind_generalizations) {
+ MapRef map(broker(), data);
+ if (map.elements_kind() == kind) return map;
+ }
+ return base::Optional<MapRef>();
+}
+
+void MapRef::SerializeForElementLoad() {
+ if (data()->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializeForElementLoad(broker());
+}
+
+void MapRef::SerializeForElementStore() {
+ if (data()->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializeForElementStore(broker());
+}
+
+void MapData::SerializeForElementLoad(JSHeapBroker* broker) {
+ if (serialized_for_element_load_) return;
+ serialized_for_element_load_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeForElementLoad");
+ SerializePrototype(broker);
+}
+
+void MapData::SerializeForElementStore(JSHeapBroker* broker) {
+ if (serialized_for_element_store_) return;
+ serialized_for_element_store_ = true;
+
+ TraceScope tracer(broker, this, "MapData::SerializeForElementStore");
+ // TODO(solanes, v8:7790): This should use MapData methods rather than
+ // constructing MapRefs, but it involves non-trivial refactoring and this
+ // method should go away anyway once the compiler is fully concurrent.
+ MapRef map(broker, this);
+ do {
+ map.SerializePrototype();
+ map = map.prototype().value().map();
+ } while (map.IsJSObjectMap() && map.is_stable() &&
+ IsFastElementsKind(map.elements_kind()));
+}
+
+bool MapRef::HasOnlyStablePrototypesWithFastElements(
+ ZoneVector<MapRef>* prototype_maps) {
+ DCHECK_NOT_NULL(prototype_maps);
+ MapRef prototype_map = prototype().value().map();
+ while (prototype_map.oddball_type() != OddballType::kNull) {
+ if (!prototype_map.IsJSObjectMap() || !prototype_map.is_stable() ||
+ !IsFastElementsKind(prototype_map.elements_kind())) {
+ return false;
+ }
+ prototype_maps->push_back(prototype_map);
+ prototype_map = prototype_map.prototype().value().map();
+ }
+ return true;
+}
+
+bool MapRef::supports_fast_array_iteration() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ return SupportsFastArrayIteration(broker(), object());
+ }
+ return data()->AsMap()->supports_fast_array_iteration();
+}
+
+bool MapRef::supports_fast_array_resize() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ return SupportsFastArrayResize(broker(), object());
+ }
+ return data()->AsMap()->supports_fast_array_resize();
+}
+
+int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
+ if (data_->should_access_heap()) {
+ return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate());
+ }
+ return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
+}
+
+OddballType MapRef::oddball_type() const {
+ if (instance_type() != ODDBALL_TYPE) {
+ return OddballType::kNone;
+ }
+ Factory* f = broker()->isolate()->factory();
+ if (equals(MakeRef(broker(), f->undefined_map()))) {
+ return OddballType::kUndefined;
+ }
+ if (equals(MakeRef(broker(), f->null_map()))) {
+ return OddballType::kNull;
+ }
+ if (equals(MakeRef(broker(), f->boolean_map()))) {
+ return OddballType::kBoolean;
+ }
+ if (equals(MakeRef(broker(), f->the_hole_map()))) {
+ return OddballType::kHole;
+ }
+ if (equals(MakeRef(broker(), f->uninitialized_map()))) {
+ return OddballType::kUninitialized;
+ }
+ DCHECK(equals(MakeRef(broker(), f->termination_exception_map())) ||
+ equals(MakeRef(broker(), f->arguments_marker_map())) ||
+ equals(MakeRef(broker(), f->optimized_out_map())) ||
+ equals(MakeRef(broker(), f->stale_register_map())));
+ return OddballType::kOther;
+}
+
+FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
+ if (data_->should_access_heap()) {
+ // These should all be available because we request the cell for each
+ // CreateClosure bytecode.
+ return MakeRef(broker(), object()->closure_feedback_cell(index));
+ }
+
+ return FeedbackCellRef(
+ broker(),
+ data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
+}
+
+ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
+ CHECK(index.is_inobject());
+ if (data_->should_access_heap()) {
+ return MakeRef(broker(), object()->RawFastPropertyAt(index));
+ }
+ JSObjectData* object_data = data()->AsJSObject();
+ return ObjectRef(broker(),
+ object_data->GetInobjectField(index.property_index()));
+}
+
+bool AllocationSiteRef::IsFastLiteral() const {
+ if (data_->should_access_heap()) {
+ CHECK_NE(data_->kind(), ObjectDataKind::kNeverSerializedHeapObject);
+ return IsInlinableFastLiteral(
+ handle(object()->boilerplate(kAcquireLoad), broker()->isolate()));
+ }
+ return data()->AsAllocationSite()->IsFastLiteral();
+}
+
+void AllocationSiteRef::SerializeBoilerplate() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsAllocationSite()->SerializeBoilerplate(broker());
+}
+
+void JSObjectRef::SerializeElements() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSObject()->SerializeElements(broker());
+}
+
+void JSObjectRef::EnsureElementsTenured() {
+ if (data_->should_access_heap()) {
+ Handle<FixedArrayBase> object_elements = elements().value().object();
+ if (ObjectInYoungGeneration(*object_elements)) {
+ // If we would like to pretenure a fixed cow array, we must ensure that
+ // the array is already in old space, otherwise we'll create too many
+ // old-to-new-space pointers (overflowing the store buffer).
+ object_elements =
+ broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
+ Handle<FixedArray>::cast(object_elements));
+ object()->set_elements(*object_elements);
+ }
+ return;
+ }
+ CHECK(data()->AsJSObject()->cow_or_empty_elements_tenured());
+}
+
+FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index);
+ DCHECK(result.is_inobject());
+ return result;
+ }
+ DescriptorArrayData* descriptors =
+ data()->AsMap()->instance_descriptors()->AsDescriptorArray();
+ FieldIndex result = descriptors->GetFieldIndexFor(descriptor_index);
+ DCHECK(result.is_inobject());
+ return result;
+}
+
+int MapRef::GetInObjectPropertyOffset(int i) const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ return object()->GetInObjectPropertyOffset(i);
+ }
+ return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
+}
+
+PropertyDetails MapRef::GetPropertyDetails(
+ InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetPropertyDetails(descriptor_index);
+}
+
+NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetPropertyKey(descriptor_index);
+}
+
+bool MapRef::IsFixedCowArrayMap() const {
+ Handle<Map> fixed_cow_array_map =
+ ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map_handle();
+ return equals(MakeRef(broker(), fixed_cow_array_map));
+}
+
+bool MapRef::IsPrimitiveMap() const {
+ return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
+}
+
+MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // TODO(solanes, v8:7790): Consider caching the result of the field owner on
+ // the descriptor array. It would be useful for same map as well as any
+ // other map sharing that descriptor array.
+ return MapRef(broker(), broker()->GetOrCreateData(object()->FindFieldOwner(
+ broker()->isolate(), descriptor_index)));
+ }
+ DescriptorArrayData* descriptors =
+ data()->AsMap()->instance_descriptors()->AsDescriptorArray();
+ return MapRef(broker(), descriptors->FindFieldOwner(descriptor_index));
+}
+
+ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetFieldType(descriptor_index);
+}
+
+base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
+ uint32_t index, SerializationPolicy policy) const {
+ if (data_->should_access_heap()) {
+ // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optimization for
+ // concurrent inlining when we have the infrastructure to safely do so.
+ if (broker()->is_concurrent_inlining()) return base::nullopt;
+ CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
+ return GetOwnElementFromHeap(broker(), object(), index, true);
+ }
+ ObjectData* element =
+ data()->AsString()->GetCharAsStringOrUndefined(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
+}
+
+base::Optional<int> StringRef::length() const {
+ if (data_->should_access_heap()) {
+ if (data_->kind() == kNeverSerializedHeapObject &&
+ !this->IsInternalizedString()) {
+ TRACE_BROKER_MISSING(
+ broker(),
+ "length for kNeverSerialized non-internalized string " << *this);
+ return base::nullopt;
+ } else {
+ return object()->length(kAcquireLoad);
+ }
+ }
+ return data()->AsString()->length();
+}
+
+base::Optional<uint16_t> StringRef::GetFirstChar() {
+ if (data_->should_access_heap()) {
+ if (data_->kind() == kNeverSerializedHeapObject &&
+ !this->IsInternalizedString()) {
+ TRACE_BROKER_MISSING(
+ broker(),
+ "first char for kNeverSerialized non-internalized string " << *this);
+ return base::nullopt;
+ }
+
+ if (!broker()->IsMainThread()) {
+ return object()->Get(0, broker()->local_isolate());
+ } else {
+ // TODO(solanes, v8:7790): Remove this case once the inlining phase is
+ // done concurrently all the time.
+ return object()->Get(0);
+ }
+ }
+ return data()->AsString()->first_char();
+}
+
+base::Optional<double> StringRef::ToNumber() {
+ if (data_->should_access_heap()) {
+ if (data_->kind() == kNeverSerializedHeapObject &&
+ !this->IsInternalizedString()) {
+ TRACE_BROKER_MISSING(
+ broker(),
+ "number for kNeverSerialized non-internalized string " << *this);
+ return base::nullopt;
+ }
+
+ return TryStringToDouble(broker()->local_isolate(), object());
+ }
+ return data()->AsString()->to_number();
+}
+
+int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
+ if (data_->should_access_heap()) {
+ return object()->constant_elements().length();
+ }
+ return data()->AsArrayBoilerplateDescription()->constants_elements_length();
+}
+
+ObjectRef FixedArrayRef::get(int i) const { return TryGet(i).value(); }
+
+base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const {
+ return TryMakeRef(broker(), object()->get(i, kRelaxedLoad));
+}
+
+Float64 FixedDoubleArrayRef::GetFromImmutableFixedDoubleArray(int i) const {
+ STATIC_ASSERT(ref_traits<FixedDoubleArray>::ref_serialization_kind ==
+ RefSerializationKind::kNeverSerialized);
+ return Float64::FromBits(object()->get_representation(i));
+}
+
+Handle<ByteArray> BytecodeArrayRef::SourcePositionTable() const {
+ return broker()->CanonicalPersistentHandle(object()->SourcePositionTable());
+}
+
+Address BytecodeArrayRef::handler_table_address() const {
+ return reinterpret_cast<Address>(
+ object()->handler_table().GetDataStartAddress());
+}
+
+int BytecodeArrayRef::handler_table_size() const {
+ return object()->handler_table().length();
+}
+
+#define IF_ACCESS_FROM_HEAP_C(name) \
+ if (data_->should_access_heap()) { \
+ return object()->name(); \
+ }
+
+#define IF_ACCESS_FROM_HEAP(result, name) \
+ if (data_->should_access_heap()) { \
+ return MakeRef(broker(), result::cast(object()->name())); \
+ }
+
+// Macros for definining a const getter that, depending on the data kind,
+// either looks into the heap or into the serialized data.
+#define BIMODAL_ACCESSOR(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP(result, name); \
+ return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
+ }
+
+// Like above except that the result type is not an XYZRef.
+#define BIMODAL_ACCESSOR_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_C(name); \
+ return ObjectRef::data()->As##holder()->name(); \
+ }
+
+// Like above but for BitFields.
+#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
+ typename BitField::FieldType holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_C(name); \
+ return BitField::decode(ObjectRef::data()->As##holder()->field()); \
+ }
+
+// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
+// kSerialized only for methods that we identified to be safe.
+#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
+ return MakeRef(broker(), result::cast(object()->name())); \
+ }
+#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
+ return object()->name(); \
+ }
+
+// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
+// broker()->is_concurrent_inlining() is true (even for kSerialized). This is
+// because we identified the method to be safe to use direct heap access, but
+// the holder##Data class still needs to be serialized.
+#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
+ result##Ref holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
+ return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
+ }
+#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
+ result holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
+ return ObjectRef::data()->As##holder()->name(); \
+ }
+#define BIMODAL_ACCESSOR_WITH_FLAG_B(holder, field, name, BitField) \
+ typename BitField::FieldType holder##Ref::name() const { \
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
+ return BitField::decode(ObjectRef::data()->As##holder()->field()); \
+ }
+
+BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
+BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
+BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
+BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
+BIMODAL_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
+
+BIMODAL_ACCESSOR_C(BigInt, uint64_t, AsUint64)
+
+BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
+BIMODAL_ACCESSOR_C(BytecodeArray, int, parameter_count)
+BIMODAL_ACCESSOR_C(BytecodeArray, interpreter::Register,
+ incoming_new_target_or_generator_register)
+
+BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
+
+BIMODAL_ACCESSOR(HeapObject, Map, map)
+
+BIMODAL_ACCESSOR_C(HeapNumber, double, value)
+
+// These JSBoundFunction fields are immutable after initialization. Moreover,
+// as long as JSObjects are still serialized on the main thread, all
+// JSBoundFunctionRefs are created at a time when the underlying objects are
+// guaranteed to be fully initialized.
+BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, JSReceiver, bound_target_function)
+BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, Object, bound_this)
+BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, FixedArray, bound_arguments)
+
+BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
+
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
+BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
+BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
+BIMODAL_ACCESSOR(JSFunction, Context, context)
+BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
+BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
+BIMODAL_ACCESSOR(JSFunction, Object, prototype)
+BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
+BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
+BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
+
+BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
+
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
+ Map::Bits2::ElementsKindBits)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
+ Map::Bits3::IsDictionaryMapBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_deprecated,
+ Map::Bits3::IsDeprecatedBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, NumberOfOwnDescriptors,
+ Map::Bits3::NumberOfOwnDescriptorsBits)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_migration_target,
+ Map::Bits3::IsMigrationTargetBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_prototype_slot,
+ Map::Bits1::HasPrototypeSlotBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_access_check_needed,
+ Map::Bits1::IsAccessCheckNeededBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_callable,
+ Map::Bits1::IsCallableBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_indexed_interceptor,
+ Map::Bits1::HasIndexedInterceptorBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_constructor,
+ Map::Bits1::IsConstructorBit)
+BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_undetectable,
+ Map::Bits1::IsUndetectableBit)
+BIMODAL_ACCESSOR_C(Map, int, instance_size)
+BIMODAL_ACCESSOR_WITH_FLAG_C(Map, int, NextFreePropertyIndex)
+BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
+BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
+BIMODAL_ACCESSOR_WITH_FLAG(Map, Object, GetConstructor)
+BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
+BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
+
+#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
+ BIMODAL_ACCESSOR(NativeContext, type, name)
+BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
+#undef DEF_NATIVE_CONTEXT_ACCESSOR
+
+BIMODAL_ACCESSOR_C(ObjectBoilerplateDescription, int, size)
+
+BIMODAL_ACCESSOR(PropertyCell, Object, value)
+BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
+
+BIMODAL_ACCESSOR(RegExpBoilerplateDescription, FixedArray, data)
+BIMODAL_ACCESSOR(RegExpBoilerplateDescription, String, source)
+BIMODAL_ACCESSOR_C(RegExpBoilerplateDescription, int, flags)
+
+base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
+ if (data_->should_access_heap()) {
+ HeapObject call_code = object()->call_code(kAcquireLoad);
+ if (call_code.IsUndefined()) return base::nullopt;
+ return TryMakeRef(broker(), CallHandlerInfo::cast(call_code));
+ }
+ ObjectData* call_code = data()->AsFunctionTemplateInfo()->call_code();
+ if (!call_code) return base::nullopt;
+ return CallHandlerInfoRef(broker(), call_code);
+}
+
+bool FunctionTemplateInfoRef::is_signature_undefined() const {
+ if (data_->should_access_heap()) {
+ return object()->signature().IsUndefined(broker()->isolate());
+ }
+ return data()->AsFunctionTemplateInfo()->is_signature_undefined();
+}
+
+bool FunctionTemplateInfoRef::has_call_code() const {
+ if (data_->should_access_heap()) {
+ HeapObject call_code = object()->call_code(kAcquireLoad);
+ return !call_code.IsUndefined();
+ }
+ return data()->AsFunctionTemplateInfo()->has_call_code();
+}
+
+bool FunctionTemplateInfoRef ::accept_any_receiver() const {
+ if (data_->should_access_heap()) {
+ return object()->accept_any_receiver();
+ }
+ return ObjectRef ::data()->AsFunctionTemplateInfo()->accept_any_receiver();
+}
+
+HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
+ MapRef receiver_map, SerializationPolicy policy) {
+ const HolderLookupResult not_found;
+
+ if (data_->should_access_heap()) {
+ // There are currently two ways we can see a FunctionTemplateInfo on the
+ // background thread: 1.) As part of a SharedFunctionInfo and 2.) in an
+ // AccessorPair. In both cases, the FTI is fully constructed on the main
+ // thread before.
+ // TODO(nicohartmann@, v8:7790): Once the above no longer holds, we might
+ // have to use the GC predicate to check whether objects are fully
+ // initialized and safe to read.
+ if (!receiver_map.IsJSReceiverMap() ||
+ (receiver_map.is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
+ return not_found;
+ }
+
+ if (!receiver_map.IsJSObjectMap()) return not_found;
+
+ DCHECK(has_call_code());
+
+ DisallowGarbageCollection no_gc;
+ HeapObject signature = object()->signature();
+ if (signature.IsUndefined()) {
+ return HolderLookupResult(CallOptimization::kHolderIsReceiver);
+ }
+ auto expected_receiver_type = FunctionTemplateInfo::cast(signature);
+ if (expected_receiver_type.IsTemplateFor(*receiver_map.object())) {
+ return HolderLookupResult(CallOptimization::kHolderIsReceiver);
+ }
+
+ if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
+ if (policy == SerializationPolicy::kSerializeIfNeeded) {
+ receiver_map.SerializePrototype();
+ }
+ base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
+ if (!prototype.has_value()) return not_found;
+ if (prototype->IsNull()) return not_found;
+
+ JSObject raw_prototype = JSObject::cast(*prototype->object());
+ if (!expected_receiver_type.IsTemplateFor(raw_prototype.map())) {
+ return not_found;
+ }
+ return HolderLookupResult(CallOptimization::kHolderFound,
+ prototype->AsJSObject());
+ }
+
+ FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
+ KnownReceiversMap::iterator lookup_it =
+ fti_data->known_receivers().find(receiver_map.data());
+ if (lookup_it != fti_data->known_receivers().cend()) {
+ return lookup_it->second;
+ }
+ if (policy == SerializationPolicy::kAssumeSerialized) {
+ TRACE_BROKER_MISSING(broker(),
+ "holder for receiver with map " << receiver_map);
+ return not_found;
+ }
+ if (!receiver_map.IsJSReceiverMap() ||
+ (receiver_map.is_access_check_needed() && !accept_any_receiver())) {
+ fti_data->known_receivers().insert({receiver_map.data(), not_found});
+ return not_found;
+ }
+
+ HolderLookupResult result;
+ CallOptimization call_optimization(broker()->local_isolate_or_isolate(),
+ object());
+ Handle<JSObject> holder = broker()->CanonicalPersistentHandle(
+ call_optimization.LookupHolderOfExpectedType(
+ broker()->local_isolate_or_isolate(), receiver_map.object(),
+ &result.lookup));
+
+ switch (result.lookup) {
+ case CallOptimization::kHolderFound: {
+ result.holder = MakeRef(broker(), holder);
+ fti_data->known_receivers().insert({receiver_map.data(), result});
+ break;
+ }
+ default: {
+ DCHECK_EQ(result.holder, base::nullopt);
+ fti_data->known_receivers().insert({receiver_map.data(), result});
+ }
+ }
+ return result;
+}
+
+BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
+
+BIMODAL_ACCESSOR_C(ScopeInfo, int, ContextLength)
+BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot)
+BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo)
+BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
+
+BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
+BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ BytecodeArray bytecode_array;
+ if (!broker()->IsMainThread()) {
+ bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
+ } else {
+ bytecode_array = object()->GetBytecodeArray(broker()->isolate());
+ }
+ return MakeRef(broker(), bytecode_array);
+ }
+ return BytecodeArrayRef(
+ broker(), ObjectRef ::data()->AsSharedFunctionInfo()->GetBytecodeArray());
+}
+#define DEF_SFI_ACCESSOR(type, name) \
+ BIMODAL_ACCESSOR_WITH_FLAG_C(SharedFunctionInfo, type, name)
+BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
+#undef DEF_SFI_ACCESSOR
+SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
+ const {
+ if (data_->should_access_heap()) {
+ if (!broker()->IsMainThread()) {
+ return object()->GetInlineability(broker()->local_isolate());
+ } else {
+ return object()->GetInlineability(broker()->isolate());
+ }
+ }
+ return ObjectRef ::data()->AsSharedFunctionInfo()->GetInlineability();
+}
+
+base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
+ if (data_->should_access_heap()) {
+ // Note that we use the synchronized accessor.
+ Object value = object()->value(kAcquireLoad);
+ if (!value.IsFeedbackVector()) return base::nullopt;
+ return TryMakeRef(broker(), FeedbackVector::cast(value));
+ }
+ ObjectData* vector = ObjectRef::data()->AsFeedbackCell()->value();
+ return FeedbackVectorRef(broker(), vector->AsFeedbackVector());
+}
+
+base::Optional<ObjectRef> MapRef::GetStrongValue(
+ InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ return instance_descriptors().GetStrongValue(descriptor_index);
+}
+
+DescriptorArrayRef MapRef::instance_descriptors() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ return MakeRefAssumeMemoryFence(
+ broker(),
+ object()->instance_descriptors(broker()->isolate(), kAcquireLoad));
+ }
+
+ return DescriptorArrayRef(broker(), data()->AsMap()->instance_descriptors());
+}
+
+base::Optional<HeapObjectRef> MapRef::prototype() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ return TryMakeRef(broker(), HeapObject::cast(object()->prototype()));
+ }
+ ObjectData* prototype_data = data()->AsMap()->prototype();
+ if (prototype_data == nullptr) {
+ TRACE_BROKER_MISSING(broker(), "prototype for map " << *this);
+ return {};
+ }
+ return HeapObjectRef(broker(), prototype_data);
+}
+
+void MapRef::SerializeRootMap() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializeRootMap(broker());
+}
+
+// TODO(solanes, v8:7790): Remove base::Optional from the return type when
+// deleting serialization.
+base::Optional<MapRef> MapRef::FindRootMap() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // TODO(solanes): Change TryMakeRef to MakeRef when Map is moved to
+ // kNeverSerialized.
+ // TODO(solanes, v8:7790): Consider caching the result of the root map.
+ return TryMakeRef(broker(), object()->FindRootMap(broker()->isolate()));
+ }
+ ObjectData* map_data = data()->AsMap()->FindRootMap();
+ if (map_data != nullptr) {
+ return MapRef(broker(), map_data);
+ }
+ TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
+ return base::nullopt;
+}
+
+bool JSTypedArrayRef::is_on_heap() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Safe to read concurrently because:
+ // - host object seen by serializer.
+ // - underlying field written 1. during initialization or 2. with
+ // release-store.
+ return object()->is_on_heap(kAcquireLoad);
+ }
+ return data()->AsJSTypedArray()->data_ptr();
+}
+
+size_t JSTypedArrayRef::length() const {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Safe to read concurrently because:
+ // - immutable after initialization.
+ // - host object seen by serializer.
+ return object()->length();
+ }
+ return data()->AsJSTypedArray()->length();
+}
+
+HeapObjectRef JSTypedArrayRef::buffer() const {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Safe to read concurrently because:
+ // - immutable after initialization.
+ // - host object seen by serializer.
+ return MakeRef<HeapObject>(broker(), object()->buffer());
+ }
+ return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
+}
+
+void* JSTypedArrayRef::data_ptr() const {
+ CHECK(!is_on_heap());
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Safe to read concurrently because:
+ // - host object seen by serializer.
+ // - underlying field written 1. during initialization or 2. protected by
+ // the is_on_heap release/acquire semantics (external_pointer store
+ // happens-before base_pointer store, and this external_pointer load
+ // happens-after base_pointer load).
+ STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
+ return object()->DataPtr();
+ }
+ return data()->AsJSTypedArray()->data_ptr();
+}
+
+bool MapRef::IsInobjectSlackTrackingInProgress() const {
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(IsInobjectSlackTrackingInProgress);
+ return Map::Bits3::ConstructionCounterBits::decode(
+ data()->AsMap()->bit_field3()) != Map::kNoSlackTracking;
+}
+
+int MapRef::constructor_function_index() const {
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(GetConstructorFunctionIndex);
+ CHECK(IsPrimitiveMap());
+ return data()->AsMap()->constructor_function_index();
+}
+
+bool MapRef::is_stable() const {
+ IF_ACCESS_FROM_HEAP_C(is_stable);
+ return !Map::Bits3::IsUnstableBit::decode(data()->AsMap()->bit_field3());
+}
+
+bool MapRef::CanBeDeprecated() const {
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(CanBeDeprecated);
+ CHECK_GT(NumberOfOwnDescriptors(), 0);
+ return data()->AsMap()->can_be_deprecated();
+}
+
+bool MapRef::CanTransition() const {
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(CanTransition);
+ return data()->AsMap()->can_transition();
+}
+
+int MapRef::GetInObjectPropertiesStartInWords() const {
+ IF_ACCESS_FROM_HEAP_WITH_FLAG_C(GetInObjectPropertiesStartInWords);
+ return data()->AsMap()->in_object_properties_start_in_words();
+}
+
+int MapRef::GetInObjectProperties() const {
+ IF_ACCESS_FROM_HEAP_C(GetInObjectProperties);
+ return data()->AsMap()->in_object_properties();
+}
+
+void ScopeInfoRef::SerializeScopeInfoChain() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsScopeInfo()->SerializeScopeInfoChain(broker());
+}
+
+bool StringRef::IsExternalString() const {
+ IF_ACCESS_FROM_HEAP_C(IsExternalString);
+ return data()->AsString()->is_external_string();
+}
+
+Address CallHandlerInfoRef::callback() const {
+ if (data_->should_access_heap()) {
+ return v8::ToCData<Address>(object()->callback());
+ }
+ return HeapObjectRef::data()->AsCallHandlerInfo()->callback();
+}
+
+ZoneVector<Address> FunctionTemplateInfoRef::c_functions() const {
+ if (data_->should_access_heap()) {
+ return GetCFunctions(FixedArray::cast(object()->GetCFunctionOverloads()),
+ broker()->zone());
+ }
+ return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_functions();
+}
+
+ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const {
+ if (data_->should_access_heap()) {
+ return GetCSignatures(FixedArray::cast(object()->GetCFunctionOverloads()),
+ broker()->zone());
+ }
+ return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_signatures();
+}
+
+bool StringRef::IsSeqString() const {
+ IF_ACCESS_FROM_HEAP_C(IsSeqString);
+ return data()->AsString()->is_seq_string();
+}
+
+bool NativeContextRef::is_unserialized_heap_object() const {
+ return data_->kind() == kUnserializedHeapObject;
+}
+
+ScopeInfoRef NativeContextRef::scope_info() const {
+ if (data_->should_access_heap()) {
+ // The scope_info is immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->scope_info());
+ }
+ return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
+}
+
+SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
+ if (data_->should_access_heap()) {
+ return MakeRef(broker(), object()->shared_function_info());
+ }
+
+ return SharedFunctionInfoRef(
+ broker(), data()->AsFeedbackVector()->shared_function_info());
+}
+
+MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
+ DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
+ DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
+ if (data_->should_access_heap()) {
+ CHECK_LT(index, object()->length());
+ return MakeRefAssumeMemoryFence(broker(),
+ object()->get(index, kAcquireLoad))
+ .AsMap();
+ }
+ return MapRef(broker(), data()->AsNativeContext()->function_maps().at(
+ index - Context::FIRST_FUNCTION_MAP_INDEX));
+}
+
+MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
+ switch (kind) {
+ case PACKED_SMI_ELEMENTS:
+ return js_array_packed_smi_elements_map();
+ case HOLEY_SMI_ELEMENTS:
+ return js_array_holey_smi_elements_map();
+ case PACKED_DOUBLE_ELEMENTS:
+ return js_array_packed_double_elements_map();
+ case HOLEY_DOUBLE_ELEMENTS:
+ return js_array_holey_double_elements_map();
+ case PACKED_ELEMENTS:
+ return js_array_packed_elements_map();
+ case HOLEY_ELEMENTS:
+ return js_array_holey_elements_map();
+ default:
+ UNREACHABLE();
+ }
+}
+
+base::Optional<JSFunctionRef> NativeContextRef::GetConstructorFunction(
+ const MapRef& map) const {
+ CHECK(map.IsPrimitiveMap());
+ switch (map.constructor_function_index()) {
+ case Map::kNoConstructorFunctionIndex:
+ return base::nullopt;
+ case Context::BIGINT_FUNCTION_INDEX:
+ return bigint_function();
+ case Context::BOOLEAN_FUNCTION_INDEX:
+ return boolean_function();
+ case Context::NUMBER_FUNCTION_INDEX:
+ return number_function();
+ case Context::STRING_FUNCTION_INDEX:
+ return string_function();
+ case Context::SYMBOL_FUNCTION_INDEX:
+ return symbol_function();
+ default:
+ UNREACHABLE();
+ }
+}
+
+bool ObjectRef::IsNull() const { return object()->IsNull(); }
+
+bool ObjectRef::IsNullOrUndefined() const {
+ if (IsSmi()) return false;
+ OddballType type = AsHeapObject().map().oddball_type();
+ return type == OddballType::kNull || type == OddballType::kUndefined;
+}
+
+bool ObjectRef::IsTheHole() const {
+ return IsHeapObject() &&
+ AsHeapObject().map().oddball_type() == OddballType::kHole;
+}
+
+base::Optional<bool> ObjectRef::TryGetBooleanValue() const {
+ if (data_->should_access_heap()) {
+ return object()->BooleanValue(broker()->isolate());
+ }
+ if (IsSmi()) return AsSmi() != 0;
+ return data()->AsHeapObject()->TryGetBooleanValue(broker());
+}
+
+Maybe<double> ObjectRef::OddballToNumber() const {
+ OddballType type = AsHeapObject().map().oddball_type();
+
+ switch (type) {
+ case OddballType::kBoolean: {
+ ObjectRef true_ref(broker(),
+ broker()->isolate()->factory()->true_value());
+ return this->equals(true_ref) ? Just(1.0) : Just(0.0);
+ break;
+ }
+ case OddballType::kUndefined: {
+ return Just(std::numeric_limits<double>::quiet_NaN());
+ break;
+ }
+ case OddballType::kNull: {
+ return Just(0.0);
+ break;
+ }
+ default: {
+ return Nothing<double>();
+ break;
+ }
+ }
+}
+
+base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
+ uint32_t index, SerializationPolicy policy) const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // `elements` are currently still serialized as members of JSObjectRef.
+ // TODO(jgruber,v8:7790): Once JSObject is no longer serialized, we must
+ // guarantee consistency between `object`, `elements_kind` and `elements`
+ // through other means (store/load order? locks? storing elements_kind in
+ // elements.map?).
+ STATIC_ASSERT(IsSerializedRef<JSObject>());
+
+ base::Optional<FixedArrayBaseRef> maybe_elements_ref = elements();
+ if (!maybe_elements_ref.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "JSObject::elements" << *this);
+ return {};
+ }
+
+ FixedArrayBaseRef elements_ref = maybe_elements_ref.value();
+ ElementsKind elements_kind = GetElementsKind();
+
+ DCHECK_LE(index, JSObject::kMaxElementIndex);
+
+ // See also ElementsAccessorBase::GetMaxIndex.
+ if (IsJSArray()) {
+ // For JSArrays we additionally need to check against JSArray::length.
+ // Length_unsafe is safe to use in this case since:
+ // - GetOwnConstantElement only detects a constant for JSArray holders if
+ // the array is frozen/sealed.
+ // - Frozen/sealed arrays can't change length.
+ // - We've already seen a map with frozen/sealed elements_kinds (above);
+ // - The release-load of that map ensures we read the newest value
+ // of `length` below.
+ uint32_t array_length;
+ if (!AsJSArray().length_unsafe().object()->ToArrayLength(&array_length)) {
+ return {};
+ }
+ if (index >= array_length) return {};
+ }
+
+ Object maybe_element;
+ auto result = ConcurrentLookupIterator::TryGetOwnConstantElement(
+ &maybe_element, broker()->isolate(), broker()->local_isolate(),
+ *object(), *elements_ref.object(), elements_kind, index);
+
+ if (result == ConcurrentLookupIterator::kGaveUp) {
+ TRACE_BROKER_MISSING(broker(), "JSObject::GetOwnConstantElement on "
+ << *this << " at index " << index);
+ return {};
+ } else if (result == ConcurrentLookupIterator::kNotPresent) {
+ return {};
+ }
+
+ DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
+ return MakeRef(broker(), maybe_element);
+ } else {
+ ObjectData* element =
+ data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
+ }
+}
+
+base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
+ Representation field_representation, FieldIndex index,
+ SerializationPolicy policy) const {
+ if (data_->should_access_heap()) {
+ return GetOwnFastDataPropertyFromHeap(broker(),
+ Handle<JSObject>::cast(object()),
+ field_representation, index);
+ }
+ ObjectData* property = data()->AsJSObject()->GetOwnFastDataProperty(
+ broker(), field_representation, index, policy);
+ if (property == nullptr) return base::nullopt;
+ return ObjectRef(broker(), property);
+}
+
+ObjectRef JSObjectRef::GetOwnDictionaryProperty(
+ InternalIndex index, SerializationPolicy policy) const {
+ CHECK(index.is_found());
+ if (data_->should_access_heap()) {
+ return GetOwnDictionaryPropertyFromHeap(
+ broker(), Handle<JSObject>::cast(object()), index);
+ }
+ ObjectData* property =
+ data()->AsJSObject()->GetOwnDictionaryProperty(broker(), index, policy);
+ CHECK_NE(property, nullptr);
+ return ObjectRef(broker(), property);
+}
+
+ObjectRef JSArrayRef::GetBoilerplateLength() const {
+ // Safe to read concurrently because:
+ // - boilerplates are immutable after initialization.
+ // - boilerplates are published into the feedback vector.
+ return length_unsafe();
+}
+
+ObjectRef JSArrayRef::length_unsafe() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ return MakeRef(broker(),
+ object()->length(broker()->isolate(), kRelaxedLoad));
+ } else {
+ return ObjectRef{broker(), data()->AsJSArray()->length()};
+ }
+}
+
+base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
+ FixedArrayBaseRef elements_ref, uint32_t index,
+ SerializationPolicy policy) const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // `elements` are currently still serialized as members of JSObjectRef.
+ // TODO(jgruber,v8:7790): Remove the elements equality DCHECK below once
+ // JSObject is no longer serialized.
+ static_assert(std::is_base_of<JSObject, JSArray>::value, "");
+ STATIC_ASSERT(IsSerializedRef<JSObject>());
+
+ // The elements_ref is passed in by callers to make explicit that it is
+ // also used outside of this function, and must match the `elements` used
+ // inside this function.
+ DCHECK(elements_ref.equals(elements().value()));
+
+ // Due to concurrency, the kind read here may not be consistent with
+ // `elements_ref`. But consistency is guaranteed at runtime due to the
+ // `elements` equality check in the caller.
+ ElementsKind elements_kind = GetElementsKind();
+
+ // We only inspect fixed COW arrays, which may only occur for fast
+ // smi/objects elements kinds.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (!elements_ref.map().IsFixedCowArrayMap()) return {};
+
+ // As the name says, the `length` read here is unsafe and may not match
+ // `elements`. We rely on the invariant that any `length` change will
+ // also result in an `elements` change to make this safe. The `elements`
+ // equality check in the caller thus also guards the value of `length`.
+ ObjectRef length_ref = length_unsafe();
+
+ // Likewise we only deal with smi lengths.
+ if (!length_ref.IsSmi()) return {};
+
+ base::Optional<Object> result =
+ ConcurrentLookupIterator::TryGetOwnCowElement(
+ broker()->isolate(), *elements_ref.AsFixedArray().object(),
+ elements_kind, length_ref.AsSmi(), index);
+ if (!result.has_value()) return {};
+
+ return MakeRef(broker(), result.value());
+ } else {
+ DCHECK(!data_->should_access_heap());
+ DCHECK(!broker()->is_concurrent_inlining());
+
+ // Just to clarify that `elements_ref` is not used on this path.
+ // GetOwnElement accesses the serialized `elements` field on its own.
+ USE(elements_ref);
+
+ if (!elements().value().map().IsFixedCowArrayMap()) return base::nullopt;
+
+ ObjectData* element =
+ data()->AsJSArray()->GetOwnElement(broker(), index, policy);
+ if (element == nullptr) return base::nullopt;
+ return ObjectRef(broker(), element);
+ }
+}
+
+base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
+ if (data_->should_access_heap()) {
+ return TryMakeRef(broker(), object()->GetCell(cell_index));
+ }
+ ObjectData* cell =
+ data()->AsSourceTextModule()->GetCell(broker(), cell_index);
+ if (cell == nullptr) return base::nullopt;
+ return CellRef(broker(), cell);
+}
+
+base::Optional<ObjectRef> SourceTextModuleRef::import_meta() const {
+ if (data_->should_access_heap()) {
+ return TryMakeRef(broker(), object()->import_meta(kAcquireLoad));
+ }
+ return ObjectRef(broker(),
+ data()->AsSourceTextModule()->GetImportMeta(broker()));
+}
+
+ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
+ bool check_type)
+ : broker_(broker) {
+ CHECK_NE(broker->mode(), JSHeapBroker::kRetired);
+ data_ = broker->GetOrCreateData(object);
+}
+
+namespace {
+
+OddballType GetOddballType(Isolate* isolate, Map map) {
+ if (map.instance_type() != ODDBALL_TYPE) {
+ return OddballType::kNone;
+ }
+ ReadOnlyRoots roots(isolate);
+ if (map == roots.undefined_map()) {
+ return OddballType::kUndefined;
+ }
+ if (map == roots.null_map()) {
+ return OddballType::kNull;
+ }
+ if (map == roots.boolean_map()) {
+ return OddballType::kBoolean;
+ }
+ if (map == roots.the_hole_map()) {
+ return OddballType::kHole;
+ }
+ if (map == roots.uninitialized_map()) {
+ return OddballType::kUninitialized;
+ }
+ DCHECK(map == roots.termination_exception_map() ||
+ map == roots.arguments_marker_map() ||
+ map == roots.optimized_out_map() || map == roots.stale_register_map());
+ return OddballType::kOther;
+}
+
+} // namespace
+
+HeapObjectType HeapObjectRef::GetHeapObjectType() const {
+ if (data_->should_access_heap()) {
+ Map map = Handle<HeapObject>::cast(object())->map();
+ HeapObjectType::Flags flags(0);
+ if (map.is_undetectable()) flags |= HeapObjectType::kUndetectable;
+ if (map.is_callable()) flags |= HeapObjectType::kCallable;
+ return HeapObjectType(map.instance_type(), flags,
+ GetOddballType(broker()->isolate(), map));
+ }
+ HeapObjectType::Flags flags(0);
+ if (map().is_undetectable()) flags |= HeapObjectType::kUndetectable;
+ if (map().is_callable()) flags |= HeapObjectType::kCallable;
+ return HeapObjectType(map().instance_type(), flags, map().oddball_type());
+}
+
+base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
+ if (data_->should_access_heap()) {
+ return TryMakeRef(broker(), object()->boilerplate(kAcquireLoad));
+ }
+ ObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
+ if (boilerplate) {
+ return JSObjectRef(broker(), boilerplate);
+ } else {
+ return base::nullopt;
+ }
+}
+
+ElementsKind JSObjectRef::GetElementsKind() const {
+ return map().elements_kind();
+}
+
+base::Optional<FixedArrayBaseRef> JSObjectRef::elements() const {
+ if (data_->should_access_heap()) {
+ return TryMakeRef(broker(), object()->elements());
+ }
+ const JSObjectData* d = data()->AsJSObject();
+ if (!d->serialized_elements()) {
+ TRACE(broker(), "'elements' on " << this);
+ return base::nullopt;
+ }
+ return FixedArrayBaseRef(broker(), d->elements());
+}
+
+int FixedArrayBaseRef::length() const {
+ IF_ACCESS_FROM_HEAP_C(length);
+ return data()->AsFixedArrayBase()->length();
+}
+
+PropertyDetails DescriptorArrayRef::GetPropertyDetails(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap()) {
+ return object()->GetDetails(descriptor_index);
+ }
+ return data()->AsDescriptorArray()->GetPropertyDetails(descriptor_index);
+}
+
+NameRef DescriptorArrayRef::GetPropertyKey(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap()) {
+ NameRef result = MakeRef(broker(), object()->GetKey(descriptor_index));
+ CHECK(result.IsUniqueName());
+ return result;
+ }
+ return NameRef(broker(),
+ data()->AsDescriptorArray()->GetPropertyKey(descriptor_index));
+}
+
+ObjectRef DescriptorArrayRef::GetFieldType(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap()) {
+ return MakeRef<Object>(broker(), object()->GetFieldType(descriptor_index));
+ }
+ return ObjectRef(broker(),
+ data()->AsDescriptorArray()->GetFieldType(descriptor_index));
+}
+
+base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
+ InternalIndex descriptor_index) const {
+ if (data_->should_access_heap()) {
+ HeapObject heap_object;
+ if (!object()
+ ->GetValue(descriptor_index)
+ .GetHeapObjectIfStrong(&heap_object)) {
+ return {};
+ }
+ // Since the descriptors in the descriptor array can be changed in-place
+ // via DescriptorArray::Replace, we might get a value that we haven't seen
+ // before.
+ return TryMakeRef(broker(), heap_object);
+ }
+ ObjectData* value =
+ data()->AsDescriptorArray()->GetStrongValue(descriptor_index);
+ if (!value) return base::nullopt;
+ return ObjectRef(broker(), value);
+}
+
+base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
+ const {
+ if (value()) {
+ FeedbackVectorRef vector = *value();
+ if (vector.serialized()) {
+ return vector.shared_function_info();
+ }
+ }
+ return base::nullopt;
+}
+
+void FeedbackVectorRef::Serialize() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsFeedbackVector()->Serialize(broker());
+}
+
+bool FeedbackVectorRef::serialized() const {
+ if (data_->should_access_heap()) return true;
+ return data()->AsFeedbackVector()->serialized();
+}
+
+bool NameRef::IsUniqueName() const {
+ // Must match Name::IsUniqueName.
+ return IsInternalizedString() || IsSymbol();
+}
+
+void RegExpBoilerplateDescriptionRef::Serialize() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ HeapObjectRef::data()->AsRegExpBoilerplateDescription()->Serialize(broker());
+}
+
+Handle<Object> ObjectRef::object() const {
+#ifdef DEBUG
+ if (broker()->mode() == JSHeapBroker::kSerialized &&
+ data_->used_status == ObjectData::Usage::kUnused) {
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
+ }
+#endif // DEBUG
+ return data_->object();
+}
+
+#ifdef DEBUG
+#define DEF_OBJECT_GETTER(T, ...) \
+ Handle<T> T##Ref::object() const { \
+ if (broker()->mode() == JSHeapBroker::kSerialized && \
+ data_->used_status == ObjectData::Usage::kUnused) { \
+ data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \
+ } \
+ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
+ }
+#else
+#define DEF_OBJECT_GETTER(T, ...) \
+ Handle<T> T##Ref::object() const { \
+ return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
+ }
+#endif // DEBUG
+
+HEAP_BROKER_OBJECT_LIST(DEF_OBJECT_GETTER)
+#undef DEF_OBJECT_GETTER
+
+JSHeapBroker* ObjectRef::broker() const { return broker_; }
+
+ObjectData* ObjectRef::data() const {
+ switch (broker()->mode()) {
+ case JSHeapBroker::kDisabled:
+ CHECK_NE(data_->kind(), kSerializedHeapObject);
+ return data_;
+ case JSHeapBroker::kSerializing:
+ CHECK_NE(data_->kind(), kUnserializedHeapObject);
+ return data_;
+ case JSHeapBroker::kSerialized:
+#ifdef DEBUG
+ data_->used_status = ObjectData::Usage::kDataUsed;
+#endif // DEBUG
+ CHECK_NE(data_->kind(), kUnserializedHeapObject);
+ return data_;
+ case JSHeapBroker::kRetired:
+ UNREACHABLE();
+ }
+}
+
+Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
+ const char* function, int line) {
+ TRACE_MISSING(broker, "data in function " << function << " at line " << line);
+ return AdvancedReducer::NoChange();
+}
+
+NativeContextData::NativeContextData(JSHeapBroker* broker, ObjectData** storage,
+ Handle<NativeContext> object)
+ : ContextData(broker, storage, object),
+ state_(State::kUnserialized),
+ function_maps_(broker->zone()) {}
+
+void NativeContextData::Serialize(JSHeapBroker* broker) {
+ if (state_ != State::kUnserialized) return;
+ state_ = State::kSerializedOnMainThread;
+
+ TraceScope tracer(broker, this, "NativeContextData::Serialize");
+ Handle<NativeContext> context = Handle<NativeContext>::cast(object());
+
+#define SERIALIZE_MEMBER(type, name) \
+ DCHECK_NULL(name##_); \
+ name##_ = broker->GetOrCreateData(context->name()); \
+ if (!name##_->should_access_heap()) { \
+ if (name##_->IsMap() && \
+ !InstanceTypeChecker::IsContext(name##_->AsMap()->instance_type())) { \
+ name##_->AsMap()->SerializeConstructor(broker); \
+ } \
+ if (name##_->IsJSFunction()) { \
+ name##_->AsJSFunction()->Serialize(broker); \
+ } \
+ }
+ BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ if (!broker->is_isolate_bootstrapping()) {
+ BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ }
+#undef SERIALIZE_MEMBER
+
+ if (!bound_function_with_constructor_map_->should_access_heap()) {
+ bound_function_with_constructor_map_->AsMap()->SerializePrototype(broker);
+ }
+ if (!bound_function_without_constructor_map_->should_access_heap()) {
+ bound_function_without_constructor_map_->AsMap()->SerializePrototype(
+ broker);
+ }
+
+ scope_info_ = broker->GetOrCreateData(context->scope_info());
+}
+
+void NativeContextData::SerializeOnBackground(JSHeapBroker* broker) {
+ if (state_ == State::kFullySerialized) return;
+ DCHECK_EQ(state_, State::kSerializedOnMainThread);
+ state_ = State::kSerializedOnMainThread;
+
+ UnparkedScopeIfNeeded unparked_scope(broker);
+ TraceScope tracer(broker, this, "NativeContextData::SerializeOnBackground");
+ Handle<NativeContext> context = Handle<NativeContext>::cast(object());
+
+#define SERIALIZE_MEMBER(type, name) \
+ DCHECK_NULL(name##_); \
+ name##_ = broker->GetOrCreateData(context->name(kAcquireLoad), \
+ kAssumeMemoryFence); \
+ if (!name##_->should_access_heap()) { \
+ DCHECK(!name##_->IsJSFunction()); \
+ }
+ BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ if (!broker->is_isolate_bootstrapping()) {
+ BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
+ }
+#undef SERIALIZE_MEMBER
+
+ DCHECK(function_maps_.empty());
+ int const first = Context::FIRST_FUNCTION_MAP_INDEX;
+ int const last = Context::LAST_FUNCTION_MAP_INDEX;
+ function_maps_.reserve(last + 1 - first);
+ for (int i = first; i <= last; ++i) {
+ function_maps_.push_back(broker->GetOrCreateData(
+ context->get(i, kAcquireLoad), kAssumeMemoryFence));
+ }
+}
+
+void JSFunctionRef::Serialize() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSFunction()->Serialize(broker());
+}
+
+void JSFunctionRef::SerializeCodeAndFeedback() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSFunction()->SerializeCodeAndFeedback(broker());
+}
+
+bool JSBoundFunctionRef::serialized() const {
+ if (data_->should_access_heap()) return true;
+ if (data_->AsJSBoundFunction()->serialized()) return true;
+ TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << this);
+ return false;
+}
+
+bool JSFunctionRef::serialized() const {
+ if (data_->should_access_heap()) return true;
+ if (data_->AsJSFunction()->serialized()) return true;
+ TRACE_BROKER_MISSING(broker(), "data for JSFunction " << this);
+ return false;
+}
+
+bool JSFunctionRef::serialized_code_and_feedback() const {
+ if (data_->should_access_heap()) return true;
+ return data()->AsJSFunction()->serialized_code_and_feedback();
+}
+
+CodeRef JSFunctionRef::code() const {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
+ }
+
+ return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code());
+}
+
+void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
+}
+
+void SharedFunctionInfoRef::SerializeScopeInfoChain() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsSharedFunctionInfo()->SerializeScopeInfoChain(broker());
+}
+
+base::Optional<FunctionTemplateInfoRef>
+SharedFunctionInfoRef::function_template_info() const {
+ if (data_->should_access_heap()) {
+ if (!object()->IsApiFunction()) return {};
+ return TryMakeRef(broker(), FunctionTemplateInfo::cast(
+ object()->function_data(kAcquireLoad)));
+ }
+ ObjectData* function_template_info =
+ data()->AsSharedFunctionInfo()->function_template_info();
+ if (!function_template_info) return base::nullopt;
+ return FunctionTemplateInfoRef(broker(), function_template_info);
+}
+
+int SharedFunctionInfoRef::context_header_size() const {
+ IF_ACCESS_FROM_HEAP_C(scope_info().ContextHeaderLength);
+ return data()->AsSharedFunctionInfo()->context_header_size();
+}
+
+ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
+ if (data_->should_access_heap()) {
+ return MakeRef(broker(), object()->scope_info());
+ }
+ return ScopeInfoRef(broker(), data()->AsSharedFunctionInfo()->scope_info());
+}
+
+void JSObjectRef::SerializeObjectCreateMap() {
+ if (data_->should_access_heap()) return;
+ CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
+ broker()->mode() == JSHeapBroker::kSerializing);
+ data()->AsJSObject()->SerializeObjectCreateMap(broker());
+}
+
+bool MapRef::TrySerializeOwnDescriptor(InternalIndex descriptor_index) {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ if (data_->should_access_heap()) return true;
+ CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
+ broker()->mode() == JSHeapBroker::kSerializing);
+ return data()->AsMap()->TrySerializeOwnDescriptor(broker(), descriptor_index);
+}
+
+void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
+ CHECK(TrySerializeOwnDescriptor(descriptor_index));
+}
+
+bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
+ CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
+ if (data_->should_access_heap()) return true;
+ ObjectData* maybe_desc_array_data = data()->AsMap()->instance_descriptors();
+ if (!maybe_desc_array_data) return false;
+ if (maybe_desc_array_data->should_access_heap()) return true;
+ DescriptorArrayData* desc_array_data =
+ maybe_desc_array_data->AsDescriptorArray();
+ return desc_array_data->serialized_descriptor(descriptor_index);
+}
+
+void MapRef::SerializeBackPointer() {
+ if (data_->should_access_heap()) return;
+ CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
+ broker()->mode() == JSHeapBroker::kSerializing);
+ data()->AsMap()->SerializeBackPointer(broker());
+}
+
+bool MapRef::TrySerializePrototype() {
+ if (data_->should_access_heap()) return true;
+ CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
+ broker()->mode() == JSHeapBroker::kSerializing);
+ return data()->AsMap()->TrySerializePrototype(broker());
+}
+
+void MapRef::SerializePrototype() { CHECK(TrySerializePrototype()); }
+
+void SourceTextModuleRef::Serialize() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsSourceTextModule()->Serialize(broker());
+}
+
+void NativeContextRef::Serialize() {
+ if (data_->should_access_heap()) return;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsNativeContext()->Serialize(broker());
+}
+
+void NativeContextRef::SerializeOnBackground() {
+ if (data_->should_access_heap()) return;
+ CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
+ broker()->mode() == JSHeapBroker::kSerialized);
+ data()->AsNativeContext()->SerializeOnBackground(broker());
+}
+
+void JSTypedArrayRef::Serialize() {
+ if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Even if the typed array object itself is no longer serialized (besides
+ // the JSObject parts), the `buffer` field still is and thus we need to
+ // make sure to visit it.
+ // TODO(jgruber,v8:7790): Remove once JSObject is no longer serialized.
+ static_assert(
+ std::is_base_of<JSObject, decltype(object()->buffer())>::value, "");
+ STATIC_ASSERT(IsSerializedRef<JSObject>());
+ MakeRef<JSObject>(broker(), object()->buffer());
+ } else {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsJSTypedArray()->Serialize(broker());
+ }
+}
+
+bool JSTypedArrayRef::serialized() const {
+ if (data_->should_access_heap()) return true;
+ if (broker()->is_concurrent_inlining()) return true;
+ if (data_->AsJSTypedArray()->serialized()) return true;
+ TRACE_BROKER_MISSING(broker(), "data for JSTypedArray " << this);
+ return false;
+}
+
+bool JSBoundFunctionRef::Serialize() {
+ if (data_->should_access_heap()) return true;
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ return data()->AsJSBoundFunction()->Serialize(broker());
+}
+
+bool PropertyCellRef::Serialize() const {
+ if (data_->should_access_heap()) return true;
+ CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
+ broker()->mode() == JSHeapBroker::kSerialized);
+ return data()->AsPropertyCell()->Serialize(broker());
+}
+
+void FunctionTemplateInfoRef::SerializeCallCode() {
+ if (data_->should_access_heap()) {
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ // CallHandlerInfo::data may still hold a serialized heap object, so we
+ // have to make the broker aware of it.
+ // TODO(v8:7790): Remove this case once ObjectRef is never serialized.
+ Handle<HeapObject> call_code(object()->call_code(kAcquireLoad),
+ broker()->isolate());
+ if (call_code->IsCallHandlerInfo()) {
+ broker()->GetOrCreateData(
+ Handle<CallHandlerInfo>::cast(call_code)->data());
+ }
+ return;
+ }
+ CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
+ data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
+}
+
+base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
+ NameRef const& name, SerializationPolicy policy) const {
+ if (data_->should_access_heap()) {
+ return GetPropertyCellFromHeap(broker(), name.object());
+ }
+ ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
+ broker(), name.data(), policy);
+ if (property_cell_data == nullptr) return base::nullopt;
+ return PropertyCellRef(broker(), property_cell_data);
+}
+
+std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
+ if (!FLAG_concurrent_recompilation) {
+ // We cannot be in a background thread so it's safe to read the heap.
+ AllowHandleDereference allow_handle_dereference;
+ return os << ref.data() << " {" << ref.object() << "}";
+ } else if (ref.data_->should_access_heap()) {
+ return os << ref.data() << " {" << ref.object() << "}";
+ } else {
+ return os << ref.data();
+ }
+}
+
+unsigned CodeRef::GetInlinedBytecodeSize() const {
+ if (data_->should_access_heap()) {
+ unsigned value = object()->inlined_bytecode_size();
+ if (value > 0) {
+ // Don't report inlined bytecode size if the code object was already
+ // deoptimized.
+ value = object()->marked_for_deoptimization() ? 0 : value;
+ }
+ return value;
+ }
+
+ return ObjectRef::data()->AsCode()->inlined_bytecode_size();
+}
+
+#undef BIMODAL_ACCESSOR
+#undef BIMODAL_ACCESSOR_B
+#undef BIMODAL_ACCESSOR_C
+#undef IF_ACCESS_FROM_HEAP
+#undef IF_ACCESS_FROM_HEAP_C
+#undef TRACE
+#undef TRACE_MISSING
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/compiler/heap-refs.h b/chromium/v8/src/compiler/heap-refs.h
index 9d125537661..d1f40bbedfd 100644
--- a/chromium/v8/src/compiler/heap-refs.h
+++ b/chromium/v8/src/compiler/heap-refs.h
@@ -40,6 +40,13 @@ struct WasmModule;
namespace compiler {
+class CompilationDependencies;
+struct FeedbackSource;
+class JSHeapBroker;
+class ObjectData;
+class PerIsolateCompilerCache;
+class PropertyAccessInfo;
+
// Whether we are loading a property or storing to a property.
// For a store during literal creation, do not walk up the prototype chain.
enum class AccessMode { kLoad, kStore, kStoreInLiteral, kHas };
@@ -60,111 +67,100 @@ enum class OddballType : uint8_t {
kOther // Oddball, but none of the above.
};
-// This list is sorted such that subtypes appear before their supertypes.
-// This list must not contain a type if it doesn't contain all of its subtypes
-// too. For example, it CANNOT contain FixedArrayBase if it doesn't contain
-// FixedDoubleArray, BytecodeArray and FixedArray.
-// DO NOT VIOLATE THESE TWO PROPERTIES!
-// Classes on this list will skip serialization when
-// FLAG_turbo_direct_heap_access is on. Otherwise, they might get serialized.
-#define HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(V) \
- /* Subtypes of FixedArray */ \
- V(ObjectBoilerplateDescription) \
- V(ScopeInfo) \
- /* Subtypes of String */ \
- V(InternalizedString) \
- /* Subtypes of FixedArrayBase */ \
- V(BytecodeArray) \
- /* Subtypes of Name */ \
- V(String) \
- V(Symbol) \
- /* Subtypes of HeapObject */ \
- V(AccessorInfo) \
- V(ArrayBoilerplateDescription) \
- V(CallHandlerInfo) \
- V(Cell) \
- V(Code) \
- V(DescriptorArray) \
- V(FeedbackCell) \
- V(FeedbackVector) \
- V(FunctionTemplateInfo) \
- V(Name) \
- V(RegExpBoilerplateDescription) \
- V(SharedFunctionInfo) \
- V(TemplateObjectDescription)
+enum class RefSerializationKind {
+ // Will skip serialization when --concurrent-inlining is on. Otherwise, they
+ // might get serialized. (The cake is a lie.)
+ kNeverSerialized,
+ // Can be serialized on demand from the background thread.
+ kBackgroundSerialized,
+ kSerialized,
+};
// This list is sorted such that subtypes appear before their supertypes.
// DO NOT VIOLATE THIS PROPERTY!
-// Classes in this list behave like serialized classes, but they allow lazy
-// serialization from background threads where this is safe (e.g. for objects
-// that are immutable and fully initialized once visible). Pass
-// ObjectRef::BackgroundSerialization::kAllowed to the ObjectRef constructor
-// for objects where serialization from the background thread is safe.
-#define HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \
- /* Subtypes of HeapObject */ \
- V(BigInt) \
- V(HeapNumber) \
- V(Map)
+#define HEAP_BROKER_OBJECT_LIST(V) \
+ /* Subtypes of JSObject */ \
+ V(JSArray, RefSerializationKind::kSerialized) \
+ V(JSBoundFunction, RefSerializationKind::kSerialized) \
+ V(JSDataView, RefSerializationKind::kSerialized) \
+ V(JSFunction, RefSerializationKind::kSerialized) \
+ V(JSGlobalObject, RefSerializationKind::kSerialized) \
+ V(JSGlobalProxy, RefSerializationKind::kSerialized) \
+ V(JSTypedArray, RefSerializationKind::kSerialized) \
+ /* Subtypes of Context */ \
+ V(NativeContext, RefSerializationKind::kSerialized) \
+ /* Subtypes of FixedArray */ \
+ V(ObjectBoilerplateDescription, RefSerializationKind::kNeverSerialized) \
+ V(ScriptContextTable, RefSerializationKind::kBackgroundSerialized) \
+ /* Subtypes of String */ \
+ V(InternalizedString, RefSerializationKind::kNeverSerialized) \
+ /* Subtypes of FixedArrayBase */ \
+ V(BytecodeArray, RefSerializationKind::kNeverSerialized) \
+ V(FixedArray, RefSerializationKind::kBackgroundSerialized) \
+ V(FixedDoubleArray, RefSerializationKind::kNeverSerialized) \
+ /* Subtypes of Name */ \
+ V(String, RefSerializationKind::kNeverSerialized) \
+ V(Symbol, RefSerializationKind::kNeverSerialized) \
+ /* Subtypes of JSReceiver */ \
+ V(JSObject, RefSerializationKind::kSerialized) \
+ /* Subtypes of HeapObject */ \
+ V(AccessorInfo, RefSerializationKind::kNeverSerialized) \
+ V(AllocationSite, RefSerializationKind::kSerialized) \
+ V(ArrayBoilerplateDescription, RefSerializationKind::kNeverSerialized) \
+ V(BigInt, RefSerializationKind::kBackgroundSerialized) \
+ V(CallHandlerInfo, RefSerializationKind::kNeverSerialized) \
+ V(Cell, RefSerializationKind::kNeverSerialized) \
+ V(Code, RefSerializationKind::kNeverSerialized) \
+ V(Context, RefSerializationKind::kSerialized) \
+ V(DescriptorArray, RefSerializationKind::kNeverSerialized) \
+ V(FeedbackCell, RefSerializationKind::kNeverSerialized) \
+ V(FeedbackVector, RefSerializationKind::kNeverSerialized) \
+ V(FixedArrayBase, RefSerializationKind::kBackgroundSerialized) \
+ V(FunctionTemplateInfo, RefSerializationKind::kNeverSerialized) \
+ V(HeapNumber, RefSerializationKind::kNeverSerialized) \
+ V(JSReceiver, RefSerializationKind::kBackgroundSerialized) \
+ V(Map, RefSerializationKind::kBackgroundSerialized) \
+ V(Name, RefSerializationKind::kNeverSerialized) \
+ V(PropertyCell, RefSerializationKind::kBackgroundSerialized) \
+ V(RegExpBoilerplateDescription, RefSerializationKind::kNeverSerialized) \
+ V(ScopeInfo, RefSerializationKind::kNeverSerialized) \
+ V(SharedFunctionInfo, RefSerializationKind::kNeverSerialized) \
+ V(SourceTextModule, RefSerializationKind::kNeverSerialized) \
+ V(TemplateObjectDescription, RefSerializationKind::kNeverSerialized) \
+ /* Subtypes of Object */ \
+ V(HeapObject, RefSerializationKind::kBackgroundSerialized)
+
+#define FORWARD_DECL(Name, ...) class Name##Ref;
+HEAP_BROKER_OBJECT_LIST(FORWARD_DECL)
+#undef FORWARD_DECL
-// This list is sorted such that subtypes appear before their supertypes.
-// DO NOT VIOLATE THIS PROPERTY!
-// Types in this list can be serialized on demand from the background thread.
-#define HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(V) \
- /* Subtypes of HeapObject */ \
- V(PropertyCell)
+class ObjectRef;
-// This list is sorted such that subtypes appear before their supertypes.
-// DO NOT VIOLATE THIS PROPERTY!
-#define HEAP_BROKER_SERIALIZED_OBJECT_LIST(V) \
- /* Subtypes of JSObject */ \
- V(JSArray) \
- V(JSBoundFunction) \
- V(JSDataView) \
- V(JSFunction) \
- V(JSGlobalObject) \
- V(JSGlobalProxy) \
- V(JSTypedArray) \
- /* Subtypes of Context */ \
- V(NativeContext) \
- /* Subtypes of FixedArray */ \
- V(Context) \
- V(ScriptContextTable) \
- /* Subtypes of FixedArrayBase */ \
- V(FixedArray) \
- V(FixedDoubleArray) \
- /* Subtypes of JSReceiver */ \
- V(JSObject) \
- /* Subtypes of HeapObject */ \
- V(AllocationSite) \
- V(FixedArrayBase) \
- V(JSReceiver) \
- V(SourceTextModule) \
- /* Subtypes of Object */ \
- V(HeapObject)
+template <class T>
+struct ref_traits;
-class CompilationDependencies;
-struct FeedbackSource;
-class JSHeapBroker;
-class ObjectData;
-class PerIsolateCompilerCache;
-class PropertyAccessInfo;
-#define FORWARD_DECL(Name) class Name##Ref;
-HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-#undef FORWARD_DECL
+#define REF_TRAITS(Name, Kind) \
+ template <> \
+ struct ref_traits<Name> { \
+ using ref_type = Name##Ref; \
+ static constexpr RefSerializationKind ref_serialization_kind = Kind; \
+ };
+HEAP_BROKER_OBJECT_LIST(REF_TRAITS)
+#undef REF_TYPE
+
+template <>
+struct ref_traits<Object> {
+ using ref_type = ObjectRef;
+ // Note: While a bit awkward, this artificial ref serialization kind value is
+ // okay: smis are never-serialized, and we never create raw non-smi
+ // ObjectRefs (they would at least be HeapObjectRefs instead).
+ static constexpr RefSerializationKind ref_serialization_kind =
+ RefSerializationKind::kNeverSerialized;
+};
class V8_EXPORT_PRIVATE ObjectRef {
public:
- enum class BackgroundSerialization {
- kDisallowed,
- kAllowed,
- };
-
ObjectRef(JSHeapBroker* broker, Handle<Object> object,
- BackgroundSerialization background_serialization =
- BackgroundSerialization::kDisallowed,
bool check_type = true);
ObjectRef(JSHeapBroker* broker, ObjectData* data, bool check_type = true)
: data_(data), broker_(broker) {
@@ -178,25 +174,19 @@ class V8_EXPORT_PRIVATE ObjectRef {
bool IsSmi() const;
int AsSmi() const;
-#define HEAP_IS_METHOD_DECL(Name) bool Is##Name() const;
- HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
- HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
- HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
- HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_IS_METHOD_DECL)
+#define HEAP_IS_METHOD_DECL(Name, ...) bool Is##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_IS_METHOD_DECL)
#undef HEAP_IS_METHOD_DECL
-#define HEAP_AS_METHOD_DECL(Name) Name##Ref As##Name() const;
- HEAP_BROKER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
- HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
- HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
- HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(HEAP_AS_METHOD_DECL)
+#define HEAP_AS_METHOD_DECL(Name, ...) Name##Ref As##Name() const;
+ HEAP_BROKER_OBJECT_LIST(HEAP_AS_METHOD_DECL)
#undef HEAP_AS_METHOD_DECL
bool IsNull() const;
bool IsNullOrUndefined() const;
bool IsTheHole() const;
- bool BooleanValue() const;
+ base::Optional<bool> TryGetBooleanValue() const;
Maybe<double> OddballToNumber() const;
Isolate* isolate() const;
@@ -272,20 +262,11 @@ class HeapObjectType {
// Constructors are carefully defined such that we do a type check on
// the outermost Ref class in the inheritance chain only.
-#define DEFINE_REF_CONSTRUCTOR(name, base) \
- name##Ref(JSHeapBroker* broker, Handle<Object> object, \
- BackgroundSerialization background_serialization = \
- BackgroundSerialization::kDisallowed, \
- bool check_type = true) \
- : base(broker, object, background_serialization, false) { \
+#define DEFINE_REF_CONSTRUCTOR(Name, Base) \
+ Name##Ref(JSHeapBroker* broker, ObjectData* data, bool check_type = true) \
+ : Base(broker, data, false) { \
if (check_type) { \
- CHECK(Is##name()); \
- } \
- } \
- name##Ref(JSHeapBroker* broker, ObjectData* data, bool check_type = true) \
- : base(broker, data, false) { \
- if (check_type) { \
- CHECK(Is##name()); \
+ CHECK(Is##Name()); \
} \
}
@@ -435,6 +416,11 @@ class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
int flags() const;
};
+// HeapNumberRef is only created for immutable HeapNumbers. Mutable
+// HeapNumbers (those owned by in-object or backing store fields with
+// representation type Double are not exposed to the compiler through
+// HeapNumberRef. Instead, we read their value, and protect that read
+// with a field-constness Dependency.
class HeapNumberRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(HeapNumber, HeapObjectRef)
@@ -451,9 +437,9 @@ class ContextRef : public HeapObjectRef {
Handle<Context> object() const;
// {previous} decrements {depth} by 1 for each previous link successfully
- // followed. If {depth} != 0 on function return, then it only got
- // partway to the desired depth. If {serialize} is true, then
- // {previous} will cache its findings.
+ // followed. If {depth} != 0 on function return, then it only got partway to
+ // the desired depth. If {serialize} is true, then {previous} will cache its
+ // findings (unless concurrent inlining is enabled).
ContextRef previous(size_t* depth,
SerializationPolicy policy =
SerializationPolicy::kAssumeSerialized) const;
@@ -464,15 +450,14 @@ class ContextRef : public HeapObjectRef {
SerializationPolicy::kAssumeSerialized) const;
SourceTextModuleRef GetModule(SerializationPolicy policy) const;
-
- // We only serialize the ScopeInfo if certain Promise
- // builtins are called.
- void SerializeScopeInfo();
- base::Optional<ScopeInfoRef> scope_info() const;
};
+// TODO(jgruber): Don't serialize NativeContext fields once all refs can be
+// created concurrently.
+
#define BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(V) \
V(JSFunction, array_function) \
+ V(JSFunction, function_prototype_apply) \
V(JSFunction, boolean_function) \
V(JSFunction, bigint_function) \
V(JSFunction, number_function) \
@@ -532,6 +517,8 @@ class NativeContextRef : public ContextRef {
public:
DEFINE_REF_CONSTRUCTOR(NativeContext, ContextRef)
+ bool is_unserialized_heap_object() const;
+
Handle<NativeContext> object() const;
void Serialize();
@@ -556,13 +543,6 @@ class NameRef : public HeapObjectRef {
bool IsUniqueName() const;
};
-class ScriptContextTableRef : public HeapObjectRef {
- public:
- DEFINE_REF_CONSTRUCTOR(ScriptContextTable, HeapObjectRef)
-
- Handle<ScriptContextTable> object() const;
-};
-
class DescriptorArrayRef : public HeapObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(DescriptorArray, HeapObjectRef)
@@ -701,8 +681,11 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
HeapObjectRef GetBackPointer() const;
void SerializePrototype();
- bool serialized_prototype() const;
- HeapObjectRef prototype() const;
+ // TODO(neis): We should be able to remove TrySerializePrototype once
+ // concurrent-inlining is always on. Then we can also change the return type
+ // of prototype() back to HeapObjectRef.
+ bool TrySerializePrototype();
+ base::Optional<HeapObjectRef> prototype() const;
void SerializeForElementLoad();
@@ -711,6 +694,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
ZoneVector<MapRef>* prototype_maps);
// Concerning the underlying instance_descriptors:
+ bool TrySerializeOwnDescriptor(InternalIndex descriptor_index);
void SerializeOwnDescriptor(InternalIndex descriptor_index);
bool serialized_own_descriptor(InternalIndex descriptor_index) const;
MapRef FindFieldOwner(InternalIndex descriptor_index) const;
@@ -754,8 +738,8 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
void SerializeCallCode();
base::Optional<CallHandlerInfoRef> call_code() const;
- Address c_function() const;
- const CFunctionInfo* c_signature() const;
+ ZoneVector<Address> c_functions() const;
+ ZoneVector<const CFunctionInfo*> c_signatures() const;
HolderLookupResult LookupHolderOfExpectedType(
MapRef receiver_map,
@@ -779,14 +763,6 @@ class ArrayBoilerplateDescriptionRef : public HeapObjectRef {
int constants_elements_length() const;
};
-class ObjectBoilerplateDescriptionRef : public HeapObjectRef {
- public:
- using HeapObjectRef::HeapObjectRef;
- Handle<ObjectBoilerplateDescription> object() const;
-
- int size() const;
-};
-
class FixedArrayRef : public FixedArrayBaseRef {
public:
DEFINE_REF_CONSTRUCTOR(FixedArray, FixedArrayBaseRef)
@@ -794,6 +770,12 @@ class FixedArrayRef : public FixedArrayBaseRef {
Handle<FixedArray> object() const;
ObjectRef get(int i) const;
+
+ // As above but may fail if Ref construction is not possible (e.g. for
+ // serialized types on the background thread).
+ // TODO(jgruber): Remove once all Ref types are never-serialized or
+ // background-serialized and can thus be created on background threads.
+ base::Optional<ObjectRef> TryGet(int i) const;
};
class FixedDoubleArrayRef : public FixedArrayBaseRef {
@@ -802,7 +784,10 @@ class FixedDoubleArrayRef : public FixedArrayBaseRef {
Handle<FixedDoubleArray> object() const;
- Float64 get(int i) const;
+ // Due to 64-bit unaligned reads, only usable for
+ // immutable-after-initialization FixedDoubleArrays protected by
+ // acquire-release semantics (such as boilerplate elements).
+ Float64 GetFromImmutableFixedDoubleArray(int i) const;
};
class BytecodeArrayRef : public FixedArrayBaseRef {
@@ -826,6 +811,22 @@ class BytecodeArrayRef : public FixedArrayBaseRef {
int handler_table_size() const;
};
+class ScriptContextTableRef : public FixedArrayRef {
+ public:
+ DEFINE_REF_CONSTRUCTOR(ScriptContextTable, FixedArrayRef)
+
+ Handle<ScriptContextTable> object() const;
+};
+
+class ObjectBoilerplateDescriptionRef : public FixedArrayRef {
+ public:
+ DEFINE_REF_CONSTRUCTOR(ObjectBoilerplateDescription, FixedArrayRef)
+
+ Handle<ObjectBoilerplateDescription> object() const;
+
+ int size() const;
+};
+
class JSArrayRef : public JSObjectRef {
public:
DEFINE_REF_CONSTRUCTOR(JSArray, JSObjectRef)
@@ -965,7 +966,7 @@ class SourceTextModuleRef : public HeapObjectRef {
void Serialize();
base::Optional<CellRef> GetCell(int cell_index) const;
- ObjectRef import_meta() const;
+ base::Optional<ObjectRef> import_meta() const;
};
class TemplateObjectDescriptionRef : public HeapObjectRef {
diff --git a/chromium/v8/src/compiler/int64-lowering.cc b/chromium/v8/src/compiler/int64-lowering.cc
index 706cd7de509..28eb30969c7 100644
--- a/chromium/v8/src/compiler/int64-lowering.cc
+++ b/chromium/v8/src/compiler/int64-lowering.cc
@@ -392,8 +392,7 @@ void Int64Lowering::LowerNode(Node* node) {
if (call_descriptor->GetReturnType(old_index).representation() ==
MachineRepresentation::kWord64) {
Node* high_node = graph()->NewNode(
- common()->Projection(new_index + 1), node,
- graph()->start());
+ common()->Projection(new_index + 1), node, graph()->start());
ReplaceNode(use_node, use_node, high_node);
++new_index;
}
@@ -684,11 +683,11 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, high_node);
break;
}
- case IrOpcode::kWord64Rol:
+ case IrOpcode::kWord64RolLowerable:
DCHECK(machine()->Word32Rol().IsSupported());
V8_FALLTHROUGH;
- case IrOpcode::kWord64Ror: {
- DCHECK_EQ(2, node->InputCount());
+ case IrOpcode::kWord64RorLowerable: {
+ DCHECK_EQ(3, node->InputCount());
Node* input = node->InputAt(0);
Node* shift = HasReplacementLow(node->InputAt(1))
? GetReplacementLow(node->InputAt(1))
@@ -721,7 +720,7 @@ void Int64Lowering::LowerNode(Node* node) {
auto* op1 = machine()->Word32Shr();
auto* op2 = machine()->Word32Shl();
- bool is_ror = node->opcode() == IrOpcode::kWord64Ror;
+ bool is_ror = node->opcode() == IrOpcode::kWord64RorLowerable;
if (!is_ror) std::swap(op1, op2);
Node* low_node =
@@ -742,7 +741,7 @@ void Int64Lowering::LowerNode(Node* node) {
graph()->NewNode(common()->Int32Constant(0x1F)));
}
- bool is_ror = node->opcode() == IrOpcode::kWord64Ror;
+ bool is_ror = node->opcode() == IrOpcode::kWord64RorLowerable;
Node* inv_mask =
is_ror ? graph()->NewNode(
machine()->Word32Xor(),
@@ -774,6 +773,7 @@ void Int64Lowering::LowerNode(Node* node) {
graph(), common(),
graph()->NewNode(machine()->Int32LessThan(), masked_shift6,
graph()->NewNode(common()->Int32Constant(32))));
+ lt32.Chain(NodeProperties::GetControlInput(node));
// The low word and the high word can be swapped either at the input or
// at the output. We swap the inputs so that shift does not have to be
@@ -807,13 +807,14 @@ void Int64Lowering::LowerNode(Node* node) {
}
break;
}
- case IrOpcode::kWord64Clz: {
- DCHECK_EQ(1, node->InputCount());
+ case IrOpcode::kWord64ClzLowerable: {
+ DCHECK_EQ(2, node->InputCount());
Node* input = node->InputAt(0);
Diamond d(
graph(), common(),
graph()->NewNode(machine()->Word32Equal(), GetReplacementHigh(input),
graph()->NewNode(common()->Int32Constant(0))));
+ d.Chain(NodeProperties::GetControlInput(node));
Node* low_node = d.Phi(
MachineRepresentation::kWord32,
@@ -825,14 +826,16 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
- case IrOpcode::kWord64Ctz: {
- DCHECK_EQ(1, node->InputCount());
+ case IrOpcode::kWord64CtzLowerable: {
+ DCHECK_EQ(2, node->InputCount());
DCHECK(machine()->Word32Ctz().IsSupported());
Node* input = node->InputAt(0);
Diamond d(
graph(), common(),
graph()->NewNode(machine()->Word32Equal(), GetReplacementLow(input),
graph()->NewNode(common()->Int32Constant(0))));
+ d.Chain(NodeProperties::GetControlInput(node));
+
Node* low_node =
d.Phi(MachineRepresentation::kWord32,
graph()->NewNode(machine()->Int32Add(),
@@ -844,6 +847,12 @@ void Int64Lowering::LowerNode(Node* node) {
ReplaceNode(node, low_node, graph()->NewNode(common()->Int32Constant(0)));
break;
}
+ case IrOpcode::kWord64Ror:
+ case IrOpcode::kWord64Rol:
+ case IrOpcode::kWord64Ctz:
+ case IrOpcode::kWord64Clz:
+ FATAL("%s operator should not be used in 32-bit systems",
+ node->op()->mnemonic());
case IrOpcode::kWord64Popcnt: {
DCHECK_EQ(1, node->InputCount());
Node* input = node->InputAt(0);
@@ -1030,7 +1039,7 @@ void Int64Lowering::LowerNode(Node* node) {
default: { DefaultLowering(node); }
}
-} // NOLINT(readability/fn_size)
+}
void Int64Lowering::LowerComparison(Node* node, const Operator* high_word_op,
const Operator* low_word_op) {
diff --git a/chromium/v8/src/compiler/js-call-reducer.cc b/chromium/v8/src/compiler/js-call-reducer.cc
index bb7a11d16dc..3e33d0694e5 100644
--- a/chromium/v8/src/compiler/js-call-reducer.cc
+++ b/chromium/v8/src/compiler/js-call-reducer.cc
@@ -15,6 +15,7 @@
#include "src/codegen/tnode.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/feedback-source.h"
@@ -42,7 +43,7 @@ namespace internal {
namespace compiler {
// Shorter lambda declarations with less visual clutter.
-#define _ [&]() // NOLINT(whitespace/braces)
+#define _ [&]()
class JSCallReducerAssembler : public JSGraphAssembler {
protected:
@@ -889,8 +890,8 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
Node* holder, const SharedFunctionInfoRef shared, Node* target,
const int arity, Node* effect)
: JSCallReducerAssembler(reducer, node),
- c_function_(function_template_info.c_function()),
- c_signature_(function_template_info.c_signature()),
+ c_functions_(function_template_info.c_functions()),
+ c_signatures_(function_template_info.c_signatures()),
function_template_info_(function_template_info),
receiver_(receiver),
holder_(holder),
@@ -898,8 +899,8 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
target_(target),
arity_(arity) {
DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- DCHECK_NE(c_function_, kNullAddress);
- CHECK_NOT_NULL(c_signature_);
+ CHECK_GT(c_functions_.size(), 0);
+ CHECK_GT(c_signatures_.size(), 0);
InitializeEffectControl(effect, NodeProperties::GetControlInput(node));
}
@@ -908,13 +909,16 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
// C arguments include the receiver at index 0. Thus C index 1 corresponds
// to the JS argument 0, etc.
const int c_argument_count =
- static_cast<int>(c_signature_->ArgumentCount());
+ static_cast<int>(c_signatures_[0]->ArgumentCount());
CHECK_GE(c_argument_count, kReceiver);
int cursor = 0;
base::SmallVector<Node*, kInlineSize> inputs(c_argument_count + arity_ +
kExtraInputsCount);
- inputs[cursor++] = ExternalConstant(ExternalReference::Create(c_function_));
+ // Multiple function overloads not supported yet, always call the first
+ // overload.
+ inputs[cursor++] =
+ ExternalConstant(ExternalReference::Create(c_functions_[0]));
inputs[cursor++] = n.receiver();
@@ -986,12 +990,12 @@ class FastApiCallReducerAssembler : public JSCallReducerAssembler {
TNode<Object> FastApiCall(CallDescriptor* descriptor, Node** inputs,
size_t inputs_size) {
return AddNode<Object>(graph()->NewNode(
- simplified()->FastApiCall(c_signature_, feedback(), descriptor),
+ simplified()->FastApiCall(c_signatures_[0], feedback(), descriptor),
static_cast<int>(inputs_size), inputs));
}
- const Address c_function_;
- const CFunctionInfo* const c_signature_;
+ const ZoneVector<Address> c_functions_;
+ const ZoneVector<const CFunctionInfo*> c_signatures_;
const FunctionTemplateInfoRef function_template_info_;
Node* const receiver_;
Node* const holder_;
@@ -2170,17 +2174,17 @@ TNode<Object> PromiseBuiltinReducerAssembler::ReducePromiseConstructor(
TrueConstant());
// Allocate closures for the resolve and reject cases.
- SharedFunctionInfoRef resolve_sfi(
- broker_, broker_->isolate()
- ->factory()
- ->promise_capability_default_resolve_shared_fun());
+ SharedFunctionInfoRef resolve_sfi =
+ MakeRef(broker_, broker_->isolate()
+ ->factory()
+ ->promise_capability_default_resolve_shared_fun());
TNode<JSFunction> resolve =
CreateClosureFromBuiltinSharedFunctionInfo(resolve_sfi, promise_context);
- SharedFunctionInfoRef reject_sfi(
- broker_, broker_->isolate()
- ->factory()
- ->promise_capability_default_reject_shared_fun());
+ SharedFunctionInfoRef reject_sfi =
+ MakeRef(broker_, broker_->isolate()
+ ->factory()
+ ->promise_capability_default_reject_shared_fun());
TNode<JSFunction> reject =
CreateClosureFromBuiltinSharedFunctionInfo(reject_sfi, promise_context);
@@ -2447,6 +2451,10 @@ Reduction JSCallReducer::ReduceObjectConstructor(Node* node) {
Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
JSCallNode n(node);
CallParameters const& p = n.Parameters();
+ CallFeedbackRelation new_feedback_relation =
+ p.feedback_relation() == CallFeedbackRelation::kReceiver
+ ? CallFeedbackRelation::kTarget
+ : CallFeedbackRelation::kUnrelated;
int arity = p.arity_without_implicit_args();
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny;
if (arity == 0) {
@@ -2479,9 +2487,9 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
// Morph the {node} to a {JSCallWithArrayLike}.
NodeProperties::ChangeOp(
- node, javascript()->CallWithArrayLike(
- p.frequency(), p.feedback(), p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ node, javascript()->CallWithArrayLike(p.frequency(), p.feedback(),
+ p.speculation_mode(),
+ new_feedback_relation));
return Changed(node).FollowedBy(ReduceJSCallWithArrayLike(node));
} else {
// Check whether {arguments_list} is null.
@@ -2509,7 +2517,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
Node* value0 = effect0 = control0 = graph()->NewNode(
javascript()->CallWithArrayLike(p.frequency(), p.feedback(),
p.speculation_mode(),
- CallFeedbackRelation::kUnrelated),
+ new_feedback_relation),
target, this_argument, arguments_list, n.feedback_vector(), context,
frame_state, effect0, control0);
@@ -2559,7 +2567,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
NodeProperties::ChangeOp(
node, javascript()->Call(JSCallNode::ArityForArgc(arity), p.frequency(),
p.feedback(), convert_mode, p.speculation_mode(),
- CallFeedbackRelation::kUnrelated));
+ new_feedback_relation));
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
}
@@ -2591,28 +2599,20 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- MapRef first_receiver_map(broker(), receiver_maps[0]);
+ MapRef first_receiver_map = MakeRef(broker(), receiver_maps[0]);
bool const is_constructor = first_receiver_map.is_constructor();
- if (first_receiver_map.ShouldHaveBeenSerialized() &&
- !first_receiver_map.serialized_prototype()) {
- TRACE_BROKER_MISSING(broker(),
- "serialized prototype on map " << first_receiver_map);
- return inference.NoChange();
- }
- ObjectRef const prototype = first_receiver_map.prototype();
- for (Handle<Map> const map : receiver_maps) {
- MapRef receiver_map(broker(), map);
+ base::Optional<HeapObjectRef> const prototype =
+ first_receiver_map.prototype();
+ if (!prototype.has_value()) return inference.NoChange();
- if (receiver_map.ShouldHaveBeenSerialized() &&
- !receiver_map.serialized_prototype()) {
- TRACE_BROKER_MISSING(broker(),
- "serialized prototype on map " << receiver_map);
- return inference.NoChange();
- }
+ for (Handle<Map> const map : receiver_maps) {
+ MapRef receiver_map = MakeRef(broker(), map);
+ base::Optional<HeapObjectRef> map_prototype = receiver_map.prototype();
+ if (!map_prototype.has_value()) return inference.NoChange();
// Check for consistency among the {receiver_maps}.
- if (!receiver_map.prototype().equals(prototype) ||
+ if (!map_prototype->equals(*prototype) ||
receiver_map.is_constructor() != is_constructor ||
!InstanceTypeChecker::IsJSFunctionOrBoundFunction(
receiver_map.instance_type())) {
@@ -2643,8 +2643,8 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
return inference.NoChange();
}
ReadOnlyRoots roots(isolate());
- StringRef length_string(broker(), roots.length_string_handle());
- StringRef name_string(broker(), roots.name_string_handle());
+ StringRef length_string = MakeRef(broker(), roots.length_string_handle());
+ StringRef name_string = MakeRef(broker(), roots.name_string_handle());
base::Optional<ObjectRef> length_value(
receiver_map.GetStrongValue(kLengthIndex));
@@ -2668,7 +2668,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
MapRef map = is_constructor
? native_context().bound_function_with_constructor_map()
: native_context().bound_function_without_constructor_map();
- if (!map.prototype().equals(prototype)) return inference.NoChange();
+ if (!map.prototype().value().equals(*prototype)) return inference.NoChange();
inference.RelyOnMapsPreferStability(dependencies(), jsgraph(), &effect,
control, p.feedback());
@@ -2677,6 +2677,15 @@ Reduction JSCallReducer::ReduceFunctionPrototypeBind(Node* node) {
static constexpr int kBoundThis = 1;
static constexpr int kReceiverContextEffectAndControl = 4;
int const arity = n.ArgumentCount();
+
+ if (arity > 0) {
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(arity, fixed_array_map)) {
+ return NoChange();
+ }
+ }
+
int const arity_with_bound_this = std::max(arity, kBoundThis);
int const input_count =
arity_with_bound_this + kReceiverContextEffectAndControl;
@@ -2713,10 +2722,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
HeapObjectMatcher m(target);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "Serialize call on function " << function);
- return NoChange();
- }
+ if (!function.serialized()) return NoChange();
context = jsgraph()->Constant(function.context());
} else {
context = effect = graph()->NewNode(
@@ -2785,24 +2791,17 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
if (!inference.HaveMaps()) return NoChange();
MapHandles const& object_maps = inference.GetMaps();
- MapRef candidate_map(broker(), object_maps[0]);
- if (candidate_map.ShouldHaveBeenSerialized() &&
- !candidate_map.serialized_prototype()) {
- TRACE_BROKER_MISSING(broker(), "prototype for map " << candidate_map);
- return inference.NoChange();
- }
- ObjectRef candidate_prototype = candidate_map.prototype();
+ MapRef candidate_map = MakeRef(broker(), object_maps[0]);
+ base::Optional<HeapObjectRef> candidate_prototype = candidate_map.prototype();
+ if (!candidate_prototype.has_value()) return inference.NoChange();
// Check if we can constant-fold the {candidate_prototype}.
for (size_t i = 0; i < object_maps.size(); ++i) {
- MapRef object_map(broker(), object_maps[i]);
- if (object_map.ShouldHaveBeenSerialized() &&
- !object_map.serialized_prototype()) {
- TRACE_BROKER_MISSING(broker(), "prototype for map " << object_map);
- return inference.NoChange();
- }
+ MapRef object_map = MakeRef(broker(), object_maps[i]);
+ base::Optional<HeapObjectRef> map_prototype = object_map.prototype();
+ if (!map_prototype.has_value()) return inference.NoChange();
if (IsSpecialReceiverInstanceType(object_map.instance_type()) ||
- !object_map.prototype().equals(candidate_prototype)) {
+ !map_prototype->equals(*candidate_prototype)) {
// We exclude special receivers, like JSProxy or API objects that
// might require access checks here; we also don't want to deal
// with hidden prototypes at this point.
@@ -2815,7 +2814,7 @@ Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
if (!inference.RelyOnMapsViaStability(dependencies())) {
return inference.NoChange();
}
- Node* value = jsgraph()->Constant(candidate_prototype);
+ Node* value = jsgraph()->Constant(*candidate_prototype);
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -3177,9 +3176,9 @@ bool CanInlineArrayIteratingBuiltin(JSHeapBroker* broker,
MapHandles const& receiver_maps,
ElementsKind* kind_return) {
DCHECK_NE(0, receiver_maps.size());
- *kind_return = MapRef(broker, receiver_maps[0]).elements_kind();
+ *kind_return = MakeRef(broker, receiver_maps[0]).elements_kind();
for (auto receiver_map : receiver_maps) {
- MapRef map(broker, receiver_map);
+ MapRef map = MakeRef(broker, receiver_map);
if (!map.supports_fast_array_iteration() ||
!UnionElementsKindUptoSize(kind_return, map.elements_kind())) {
return false;
@@ -3194,7 +3193,7 @@ bool CanInlineArrayResizingBuiltin(JSHeapBroker* broker,
bool builtin_is_push = false) {
DCHECK_NE(0, receiver_maps.size());
for (auto receiver_map : receiver_maps) {
- MapRef map(broker, receiver_map);
+ MapRef map = MakeRef(broker, receiver_map);
if (!map.supports_fast_array_resize()) return false;
// TODO(turbofan): We should also handle fast holey double elements once
// we got the hole NaN mess sorted out in TurboFan/V8.
@@ -3432,8 +3431,8 @@ Reduction JSCallReducer::ReduceArraySome(Node* node,
#if V8_ENABLE_WEBASSEMBLY
namespace {
+
bool CanInlineJSToWasmCall(const wasm::FunctionSig* wasm_signature) {
- DCHECK(FLAG_turbo_inline_js_wasm_calls);
if (wasm_signature->return_count() > 1) {
return false;
}
@@ -3450,10 +3449,13 @@ bool CanInlineJSToWasmCall(const wasm::FunctionSig* wasm_signature) {
return true;
}
+
} // namespace
Reduction JSCallReducer::ReduceCallWasmFunction(
Node* node, const SharedFunctionInfoRef& shared) {
+ DCHECK(flags() & kInlineJSToWasmCalls);
+
JSCallNode n(node);
const CallParameters& p = n.Parameters();
@@ -3540,11 +3542,13 @@ bool Has64BitIntegerParamsInSignature(const CFunctionInfo* c_signature) {
bool CanOptimizeFastCall(
const FunctionTemplateInfoRef& function_template_info) {
- const CFunctionInfo* c_signature = function_template_info.c_signature();
+ if (function_template_info.c_functions().empty()) return false;
+
+ // Multiple function overloads not supported yet, always call the first
+ // overload.
+ const CFunctionInfo* c_signature = function_template_info.c_signatures()[0];
- bool optimize_to_fast_call =
- FLAG_turbo_fast_api_calls &&
- function_template_info.c_function() != kNullAddress;
+ bool optimize_to_fast_call = FLAG_turbo_fast_api_calls;
#ifndef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
optimize_to_fast_call =
optimize_to_fast_call && !HasFPParamsInSignature(c_signature);
@@ -3612,7 +3616,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
MapInference inference(broker(), receiver, effect);
if (inference.HaveMaps()) {
MapHandles const& receiver_maps = inference.GetMaps();
- MapRef first_receiver_map(broker(), receiver_maps[0]);
+ MapRef first_receiver_map = MakeRef(broker(), receiver_maps[0]);
// See if we can constant-fold the compatible receiver checks.
HolderLookupResult api_holder =
@@ -3645,7 +3649,7 @@ Reduction JSCallReducer::ReduceCallApiFunction(
function_template_info.accept_any_receiver());
for (size_t i = 1; i < receiver_maps.size(); ++i) {
- MapRef receiver_map(broker(), receiver_maps[i]);
+ MapRef receiver_map = MakeRef(broker(), receiver_maps[i]);
HolderLookupResult holder_i =
function_template_info.LookupHolderOfExpectedType(receiver_map);
@@ -3893,8 +3897,8 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
if (!frame_state.frame_state_info().shared_info().ToHandle(&shared)) {
return NoChange();
}
- formal_parameter_count = SharedFunctionInfoRef(broker(), shared)
- .internal_formal_parameter_count();
+ formal_parameter_count =
+ MakeRef(broker(), shared).internal_formal_parameter_count();
}
if (type == CreateArgumentsType::kMappedArguments) {
@@ -3971,10 +3975,9 @@ Reduction JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpread(
if (IsCallWithArrayLikeOrSpread(node)) {
NodeProperties::ChangeOp(
- node,
- javascript()->Call(JSCallNode::ArityForArgc(argc), frequency, feedback,
- ConvertReceiverMode::kAny, speculation_mode,
- CallFeedbackRelation::kUnrelated));
+ node, javascript()->Call(JSCallNode::ArityForArgc(argc), frequency,
+ feedback, ConvertReceiverMode::kAny,
+ speculation_mode, feedback_relation));
return Changed(node).FollowedBy(ReduceJSCall(node));
} else {
NodeProperties::ChangeOp(
@@ -4048,7 +4051,9 @@ bool ShouldUseCallICFeedback(Node* node) {
} else if (m.IsPhi()) {
// Protect against endless loops here.
Node* control = NodeProperties::GetControlInput(node);
- if (control->opcode() == IrOpcode::kLoop) return false;
+ if (control->opcode() == IrOpcode::kLoop ||
+ control->opcode() == IrOpcode::kDead)
+ return false;
// Check if {node} is a Phi of nodes which shouldn't
// use CallIC feedback (not looking through loops).
int const value_input_count = m.node()->op()->ValueInputCount();
@@ -4063,11 +4068,7 @@ bool ShouldUseCallICFeedback(Node* node) {
} // namespace
bool JSCallReducer::IsBuiltinOrApiFunction(JSFunctionRef function) const {
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for function " << function);
- return false;
- }
-
+ if (!function.serialized()) return false;
// TODO(neis): Add a way to check if function template info isn't serialized
// and add a warning in such cases. Currently we can't tell if function
// template info doesn't exist or wasn't serialized.
@@ -4091,10 +4092,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
ObjectRef target_ref = m.Ref(broker());
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for function " << function);
- return NoChange();
- }
+ if (!function.serialized()) return NoChange();
// Don't inline cross native context.
if (!function.native_context().equals(native_context())) {
@@ -4104,10 +4102,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, function.shared());
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for function " << function);
- return NoChange();
- }
+ if (!function.serialized()) return NoChange();
ObjectRef bound_this = function.bound_this();
ConvertReceiverMode const convert_mode =
@@ -4115,6 +4110,21 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
+ // TODO(jgruber): Inline this block below once TryGet is guaranteed to
+ // succeed.
+ FixedArrayRef bound_arguments = function.bound_arguments();
+ const int bound_arguments_length = bound_arguments.length();
+ static constexpr int kInlineSize = 16; // Arbitrary.
+ base::SmallVector<Node*, kInlineSize> args;
+ for (int i = 0; i < bound_arguments_length; ++i) {
+ base::Optional<ObjectRef> maybe_arg = bound_arguments.TryGet(i);
+ if (!maybe_arg.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "bound argument");
+ return NoChange();
+ }
+ args.emplace_back(jsgraph()->Constant(maybe_arg.value()));
+ }
+
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
node, jsgraph()->Constant(function.bound_target_function()),
@@ -4123,10 +4133,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
JSCallNode::ReceiverIndex());
// Insert the [[BoundArguments]] for {node}.
- FixedArrayRef bound_arguments = function.bound_arguments();
- for (int i = 0; i < bound_arguments.length(); ++i) {
- node->InsertInput(graph()->zone(), i + 2,
- jsgraph()->Constant(bound_arguments.get(i)));
+ for (int i = 0; i < bound_arguments_length; ++i) {
+ node->InsertInput(graph()->zone(), i + 2, args[i]);
arity++;
}
@@ -4152,9 +4160,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Same if the {target} is the result of a CheckClosure operation.
if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& p = JSCreateClosureNode{target}.Parameters();
- return ReduceJSCall(node, SharedFunctionInfoRef(broker(), p.shared_info()));
+ return ReduceJSCall(node, MakeRef(broker(), p.shared_info()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
- FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
+ FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
if (cell.shared_function_info().has_value()) {
return ReduceJSCall(node, *cell.shared_function_info());
} else {
@@ -4201,7 +4209,7 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
}
if (!ShouldUseCallICFeedback(target) ||
- p.feedback_relation() != CallFeedbackRelation::kRelated ||
+ p.feedback_relation() == CallFeedbackRelation::kUnrelated ||
!p.feedback().IsValid()) {
return NoChange();
}
@@ -4213,7 +4221,14 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
node, DeoptimizeReason::kInsufficientTypeFeedbackForCall);
}
- base::Optional<HeapObjectRef> feedback_target = feedback.AsCall().target();
+ base::Optional<HeapObjectRef> feedback_target;
+ if (p.feedback_relation() == CallFeedbackRelation::kTarget) {
+ feedback_target = feedback.AsCall().target();
+ } else {
+ DCHECK_EQ(p.feedback_relation(), CallFeedbackRelation::kReceiver);
+ feedback_target = native_context().function_prototype_apply();
+ }
+
if (feedback_target.has_value() && feedback_target->map().is_callable()) {
Node* target_function = jsgraph()->Constant(*feedback_target);
@@ -4238,8 +4253,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Try to further reduce the JSCall {node}.
return Changed(node).FollowedBy(ReduceJSCall(node));
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
- FeedbackCellRef feedback_cell(
- broker(), feedback_target.value().AsFeedbackCell().object());
+ FeedbackCellRef feedback_cell =
+ MakeRef(broker(), feedback_target.value().AsFeedbackCell().object());
if (feedback_cell.value().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
@@ -4276,6 +4291,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node,
Node* target = n.target();
// Do not reduce calls to functions with break points.
+ // If this state changes during background compilation, the compilation
+ // job will be aborted from the main thread (see
+ // Debug::PrepareFunctionForDebugExecution()).
if (shared.HasBreakInfo()) return NoChange();
// Raise a TypeError if the {target} is a "classConstructor".
@@ -4736,13 +4754,12 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
if (target_ref.IsJSFunction()) {
JSFunctionRef function = target_ref.AsJSFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(),
- "function, not serialized: " << function);
- return NoChange();
- }
+ if (!function.serialized()) return NoChange();
// Do not reduce constructors with break points.
+ // If this state changes during background compilation, the compilation
+ // job will be aborted from the main thread (see
+ // Debug::PrepareFunctionForDebugExecution()).
if (function.shared().HasBreakInfo()) return NoChange();
// Don't inline cross native context.
@@ -4799,14 +4816,23 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(),
- "function, not serialized: " << function);
- return NoChange();
- }
-
+ if (!function.serialized()) return NoChange();
ObjectRef bound_target_function = function.bound_target_function();
FixedArrayRef bound_arguments = function.bound_arguments();
+ const int bound_arguments_length = bound_arguments.length();
+
+ // TODO(jgruber): Inline this block below once TryGet is guaranteed to
+ // succeed.
+ static constexpr int kInlineSize = 16; // Arbitrary.
+ base::SmallVector<Node*, kInlineSize> args;
+ for (int i = 0; i < bound_arguments_length; ++i) {
+ base::Optional<ObjectRef> maybe_arg = bound_arguments.TryGet(i);
+ if (!maybe_arg.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "bound argument");
+ return NoChange();
+ }
+ args.emplace_back(jsgraph()->Constant(maybe_arg.value()));
+ }
// Patch {node} to use [[BoundTargetFunction]].
node->ReplaceInput(n.TargetIndex(),
@@ -4823,9 +4849,8 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
new_target));
// Insert the [[BoundArguments]] for {node}.
- for (int i = 0; i < bound_arguments.length(); ++i) {
- node->InsertInput(graph()->zone(), n.ArgumentIndex(i),
- jsgraph()->Constant(bound_arguments.get(i)));
+ for (int i = 0; i < bound_arguments_length; ++i) {
+ node->InsertInput(graph()->zone(), n.ArgumentIndex(i), args[i]);
arity++;
}
@@ -5064,7 +5089,7 @@ Reduction JSCallReducer::ReduceJSConstructWithArrayLike(Node* node) {
DCHECK_EQ(n.ArgumentCount(), 1); // The arraylike object.
return ReduceCallOrConstructWithArrayLikeOrSpread(
node, arraylike_index, p.frequency(), p.feedback(),
- SpeculationMode::kDisallowSpeculation, CallFeedbackRelation::kRelated);
+ SpeculationMode::kDisallowSpeculation, CallFeedbackRelation::kTarget);
}
Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
@@ -5074,7 +5099,7 @@ Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
DCHECK_GE(n.ArgumentCount(), 1); // At least the spread.
return ReduceCallOrConstructWithArrayLikeOrSpread(
node, spread_index, p.frequency(), p.feedback(),
- SpeculationMode::kDisallowSpeculation, CallFeedbackRelation::kRelated);
+ SpeculationMode::kDisallowSpeculation, CallFeedbackRelation::kTarget);
}
Reduction JSCallReducer::ReduceReturnReceiver(Node* node) {
@@ -5611,8 +5636,8 @@ Reduction JSCallReducer::ReduceArrayPrototypeShift(Node* node) {
graph()->zone(), 1, BuiltinArguments::kNumExtraArgsWithReceiver,
Builtins::name(builtin_index), node->op()->properties(),
CallDescriptor::kNeedsFrameState);
- Node* stub_code = jsgraph()->CEntryStubConstant(1, kDontSaveFPRegs,
- kArgvOnStack, true);
+ Node* stub_code = jsgraph()->CEntryStubConstant(
+ 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
Address builtin_entry = Builtins::CppEntryOf(builtin_index);
Node* entry = jsgraph()->ExternalConstant(
ExternalReference::Create(builtin_entry));
@@ -5701,16 +5726,18 @@ Reduction JSCallReducer::ReduceArrayPrototypeSlice(Node* node) {
// `slice.call(arguments)`, for example jQuery makes heavy use of that.
bool can_be_holey = false;
for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- if (!receiver_map.supports_fast_array_iteration())
+ MapRef receiver_map = MakeRef(broker(), map);
+ if (!receiver_map.supports_fast_array_iteration()) {
return inference.NoChange();
+ }
if (IsHoleyElementsKind(receiver_map.elements_kind())) {
can_be_holey = true;
}
}
- if (!dependencies()->DependOnArraySpeciesProtector())
+ if (!dependencies()->DependOnArraySpeciesProtector()) {
return inference.NoChange();
+ }
if (can_be_holey && !dependencies()->DependOnNoElementsProtector()) {
return inference.NoChange();
}
@@ -5851,7 +5878,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
// Check that various {iterated_object_maps} have compatible elements kinds.
ElementsKind elements_kind =
- MapRef(broker(), iterated_object_maps[0]).elements_kind();
+ MakeRef(broker(), iterated_object_maps[0]).elements_kind();
if (IsTypedArrayElementsKind(elements_kind)) {
// TurboFan doesn't support loading from BigInt typed arrays yet.
if (elements_kind == BIGUINT64_ELEMENTS ||
@@ -5859,7 +5886,7 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
return inference.NoChange();
}
for (Handle<Map> map : iterated_object_maps) {
- MapRef iterated_object_map(broker(), map);
+ MapRef iterated_object_map = MakeRef(broker(), map);
if (iterated_object_map.elements_kind() != elements_kind) {
return inference.NoChange();
}
@@ -5947,11 +5974,12 @@ Reduction JSCallReducer::ReduceArrayIteratorPrototypeNext(Node* node) {
Node* etrue = effect;
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
{
- // We know that the {index} is range of the {length} now.
+ // This extra check exists to refine the type of {index} but also to break
+ // an exploitation technique that abuses typer mismatches.
index = etrue = graph()->NewNode(
- common()->TypeGuard(
- Type::Range(0.0, length_access.type.Max() - 1.0, graph()->zone())),
- index, etrue, if_true);
+ simplified()->CheckBounds(p.feedback(),
+ CheckBoundsFlag::kAbortOnOutOfBounds),
+ index, length, etrue, if_true);
done_true = jsgraph()->FalseConstant();
if (iteration_kind == IterationKind::kKeys) {
@@ -6475,15 +6503,11 @@ bool JSCallReducer::DoPromiseChecks(MapInference* inference) {
// Check whether all {receiver_maps} are JSPromise maps and
// have the initial Promise.prototype as their [[Prototype]].
for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
+ MapRef receiver_map = MakeRef(broker(), map);
if (!receiver_map.IsJSPromiseMap()) return false;
- if (receiver_map.ShouldHaveBeenSerialized() &&
- !receiver_map.serialized_prototype()) {
- TRACE_BROKER_MISSING(broker(), "prototype for map " << receiver_map);
- return false;
- }
- if (!receiver_map.prototype().equals(
- native_context().promise_prototype())) {
+ base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
+ if (!prototype.has_value() ||
+ !prototype->equals(native_context().promise_prototype())) {
return false;
}
}
@@ -6604,14 +6628,14 @@ Reduction JSCallReducer::ReducePromisePrototypeFinally(Node* node) {
context, constructor, etrue, if_true);
// Allocate the closure for the reject case.
- SharedFunctionInfoRef promise_catch_finally(
- broker(), factory()->promise_catch_finally_shared_fun());
+ SharedFunctionInfoRef promise_catch_finally =
+ MakeRef(broker(), factory()->promise_catch_finally_shared_fun());
catch_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
promise_catch_finally, context, etrue, if_true);
// Allocate the closure for the fulfill case.
- SharedFunctionInfoRef promise_then_finally(
- broker(), factory()->promise_then_finally_shared_fun());
+ SharedFunctionInfoRef promise_then_finally =
+ MakeRef(broker(), factory()->promise_then_finally_shared_fun());
then_true = etrue = CreateClosureFromBuiltinSharedFunctionInfo(
promise_then_finally, context, etrue, if_true);
}
@@ -7085,9 +7109,10 @@ Reduction JSCallReducer::ReduceCollectionIteratorPrototypeNext(
MapInference inference(broker(), receiver, effect);
if (!inference.HaveMaps()) return NoChange();
MapHandles const& receiver_maps = inference.GetMaps();
- receiver_instance_type = MapRef(broker(), receiver_maps[0]).instance_type();
+ receiver_instance_type =
+ MakeRef(broker(), receiver_maps[0]).instance_type();
for (size_t i = 1; i < receiver_maps.size(); ++i) {
- if (MapRef(broker(), receiver_maps[i]).instance_type() !=
+ if (MakeRef(broker(), receiver_maps[i]).instance_type() !=
receiver_instance_type) {
return inference.NoChange();
}
@@ -7690,10 +7715,10 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
if (broker()->is_concurrent_inlining()) {
// Obtain precomputed access infos from the broker.
for (auto map : regexp_maps) {
- MapRef map_ref(broker(), map);
+ MapRef map_ref = MakeRef(broker(), map);
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
- map_ref, NameRef(broker(), isolate()->factory()->exec_string()),
- AccessMode::kLoad);
+ map_ref, MakeRef(broker(), isolate()->factory()->exec_string()),
+ AccessMode::kLoad, dependencies());
access_infos.push_back(access_info);
}
} else {
@@ -7714,7 +7739,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Do not reduce if the exec method is not on the prototype chain.
if (!ai_exec.holder().ToHandle(&holder)) return inference.NoChange();
- JSObjectRef holder_ref(broker(), holder);
+ JSObjectRef holder_ref = MakeRef(broker(), holder);
// Bail out if the exec method is not the original one.
base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
@@ -7727,7 +7752,7 @@ Reduction JSCallReducer::ReduceRegExpPrototypeTest(Node* node) {
// Add proper dependencies on the {regexp}s [[Prototype]]s.
dependencies()->DependOnStablePrototypeChains(
ai_exec.lookup_start_object_maps(), kStartAtPrototype,
- JSObjectRef(broker(), holder));
+ MakeRef(broker(), holder));
} else {
// TODO(v8:11457) Support dictionary mode protoypes here.
return inference.NoChange();
@@ -7814,9 +7839,9 @@ Reduction JSCallReducer::ReduceBigIntAsUintN(Node* node) {
NumberMatcher matcher(bits);
if (matcher.IsInteger() && matcher.IsInRange(0, 64)) {
const int bits_value = static_cast<int>(matcher.ResolvedValue());
- value = effect = graph()->NewNode(simplified()->CheckBigInt(p.feedback()),
- value, effect, control);
- value = graph()->NewNode(simplified()->BigIntAsUintN(bits_value), value);
+ value = effect = graph()->NewNode(
+ simplified()->SpeculativeBigIntAsUintN(bits_value, p.feedback()), value,
+ effect, control);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
diff --git a/chromium/v8/src/compiler/js-context-specialization.cc b/chromium/v8/src/compiler/js-context-specialization.cc
index 448652ad8df..9c53cf13154 100644
--- a/chromium/v8/src/compiler/js-context-specialization.cc
+++ b/chromium/v8/src/compiler/js-context-specialization.cc
@@ -40,7 +40,7 @@ Reduction JSContextSpecialization::ReduceParameter(Node* node) {
// Constant-fold the function parameter {node}.
Handle<JSFunction> function;
if (closure().ToHandle(&function)) {
- Node* value = jsgraph()->Constant(JSFunctionRef(broker_, function));
+ Node* value = jsgraph()->Constant(MakeRef(broker_, function));
return Replace(value);
}
}
@@ -103,7 +103,7 @@ base::Optional<ContextRef> GetSpecializationContext(
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object(broker, HeapConstantOf(node->op()));
+ HeapObjectRef object = MakeRef(broker, HeapConstantOf(node->op()));
if (object.IsContext()) return object.AsContext();
break;
}
@@ -112,7 +112,7 @@ base::Optional<ContextRef> GetSpecializationContext(
if (maybe_outer.To(&outer) && IsContextParameter(node) &&
*distance >= outer.distance) {
*distance -= outer.distance;
- return ContextRef(broker, outer.context);
+ return MakeRef(broker, outer.context);
}
break;
}
@@ -231,7 +231,7 @@ base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
switch (context->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object(broker, HeapConstantOf(context->op()));
+ HeapObjectRef object = MakeRef(broker, HeapConstantOf(context->op()));
if (object.IsContext()) {
return find_context(object.AsContext());
}
@@ -240,7 +240,7 @@ base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
case IrOpcode::kParameter: {
OuterContext outer;
if (maybe_context.To(&outer) && IsContextParameter(context)) {
- return find_context(ContextRef(broker, outer.context));
+ return find_context(MakeRef(broker, outer.context));
}
break;
}
@@ -259,17 +259,18 @@ Reduction JSContextSpecialization::ReduceJSGetImportMeta(Node* node) {
ContextRef context = maybe_context.value();
SourceTextModuleRef module =
context.get(Context::EXTENSION_INDEX).value().AsSourceTextModule();
- ObjectRef import_meta = module.import_meta();
- if (import_meta.IsJSObject()) {
- Node* import_meta_const = jsgraph()->Constant(import_meta);
- ReplaceWithValue(node, import_meta_const);
- return Changed(import_meta_const);
- } else {
- DCHECK(import_meta.IsTheHole());
+ base::Optional<ObjectRef> import_meta = module.import_meta();
+ if (!import_meta.has_value()) return NoChange();
+ if (!import_meta->IsJSObject()) {
+ DCHECK(import_meta->IsTheHole());
// The import.meta object has not yet been created. Let JSGenericLowering
// replace the operator with a runtime call.
return NoChange();
}
+
+ Node* import_meta_const = jsgraph()->Constant(*import_meta);
+ ReplaceWithValue(node, import_meta_const);
+ return Changed(import_meta_const);
}
Isolate* JSContextSpecialization::isolate() const {
diff --git a/chromium/v8/src/compiler/js-create-lowering.cc b/chromium/v8/src/compiler/js-create-lowering.cc
index 899922a27fc..21ba072f315 100644
--- a/chromium/v8/src/compiler/js-create-lowering.cc
+++ b/chromium/v8/src/compiler/js-create-lowering.cc
@@ -151,8 +151,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
FrameState frame_state{NodeProperties::GetFrameStateInput(node)};
Node* const control = graph()->start();
FrameStateInfo state_info = frame_state.frame_state_info();
- SharedFunctionInfoRef shared(broker(),
- state_info.shared_info().ToHandleChecked());
+ SharedFunctionInfoRef shared =
+ MakeRef(broker(), state_info.shared_info().ToHandleChecked());
// Use the ArgumentsAccessStub for materializing both mapped and unmapped
// arguments object, but only for non-inlined (i.e. outermost) frames.
@@ -404,7 +404,7 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
int parameter_count_no_receiver = shared.internal_formal_parameter_count();
int length = parameter_count_no_receiver +
shared.GetBytecodeArray().register_count();
- MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
if (!ab.CanAllocateArray(length, fixed_array_map)) {
return NoChange();
@@ -622,7 +622,7 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
{
Handle<AllocationSite> site;
if (p.site().ToHandle(&site)) {
- site_ref = AllocationSiteRef(broker(), site);
+ site_ref = MakeRef(broker(), site);
}
}
AllocationType allocation = AllocationType::kYoung;
@@ -650,8 +650,8 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
allocation = dependencies()->DependOnPretenureMode(*site_ref);
dependencies()->DependOnElementsKind(*site_ref);
} else {
- PropertyCellRef array_constructor_protector(
- broker(), factory()->array_constructor_protector());
+ PropertyCellRef array_constructor_protector =
+ MakeRef(broker(), factory()->array_constructor_protector());
array_constructor_protector.SerializeAsProtector();
can_inline_call = array_constructor_protector.value().AsSmi() ==
Protectors::kProtectorValid;
@@ -775,11 +775,9 @@ Reduction JSCreateLowering::ReduceJSCreateAsyncFunctionObject(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
// Create the register file.
- MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
- if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
- return NoChange();
- }
+ CHECK(ab.CanAllocateArray(register_count, fixed_array_map));
ab.AllocateArray(register_count, fixed_array_map);
for (int i = 0; i < register_count; ++i) {
ab.Store(AccessBuilder::ForFixedArraySlot(i),
@@ -881,7 +879,7 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
- MapRef const map(broker(), p.map());
+ MapRef const map = MakeRef(broker(), p.map());
Node* bound_target_function = NodeProperties::GetValueInput(node, 0);
Node* bound_this = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -890,11 +888,9 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
// Create the [[BoundArguments]] for the result.
Node* bound_arguments = jsgraph()->EmptyFixedArrayConstant();
if (arity > 0) {
- MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
- if (!ab.CanAllocateArray(arity, fixed_array_map)) {
- return NoChange();
- }
+ CHECK(ab.CanAllocateArray(arity, fixed_array_map));
ab.AllocateArray(arity, fixed_array_map);
for (int i = 0; i < arity; ++i) {
ab.Store(AccessBuilder::ForFixedArraySlot(i),
@@ -924,9 +920,9 @@ Reduction JSCreateLowering::ReduceJSCreateBoundFunction(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
JSCreateClosureNode n(node);
CreateClosureParameters const& p = n.Parameters();
- SharedFunctionInfoRef shared(broker(), p.shared_info());
+ SharedFunctionInfoRef shared = MakeRef(broker(), p.shared_info());
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- HeapObjectRef code(broker(), p.code());
+ HeapObjectRef code = MakeRef(broker(), p.code());
Effect effect = n.effect();
Control control = n.control();
Node* context = n.context();
@@ -935,7 +931,7 @@ Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
// seen more than one instantiation, this simplifies the generated code and
// also serves as a heuristic of which allocation sites benefit from it.
if (!feedback_cell.map().equals(
- MapRef(broker(), factory()->many_closures_cell_map()))) {
+ MakeRef(broker(), factory()->many_closures_cell_map()))) {
return NoChange();
}
@@ -1041,7 +1037,7 @@ Reduction JSCreateLowering::ReduceJSCreateKeyValueArray(Node* node) {
Node* length = jsgraph()->Constant(2);
AllocationBuilder aa(jsgraph(), effect, graph()->start());
- aa.AllocateArray(2, MapRef(broker(), factory()->fixed_array_map()));
+ aa.AllocateArray(2, MakeRef(broker(), factory()->fixed_array_map()));
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
jsgraph()->ZeroConstant(), key);
aa.Store(AccessBuilder::ForFixedArrayElement(PACKED_ELEMENTS),
@@ -1100,14 +1096,21 @@ Reduction JSCreateLowering::ReduceJSCreateLiteralArrayOrObject(Node* node) {
if (!feedback.IsInsufficient()) {
AllocationSiteRef site = feedback.AsLiteral().value();
if (site.IsFastLiteral()) {
- AllocationType allocation = AllocationType::kYoung;
+ AllocationType allocation = FLAG_allocation_site_pretenuring
+ ? site.GetAllocationType()
+ : AllocationType::kYoung;
+ JSObjectRef boilerplate = site.boilerplate().value();
+ base::Optional<Node*> maybe_value =
+ TryAllocateFastLiteral(effect, control, boilerplate, allocation);
+ if (!maybe_value.has_value()) {
+ TRACE_BROKER_MISSING(broker(), "bound argument");
+ return NoChange();
+ }
if (FLAG_allocation_site_pretenuring) {
- allocation = dependencies()->DependOnPretenureMode(site);
+ CHECK_EQ(dependencies()->DependOnPretenureMode(site), allocation);
}
dependencies()->DependOnElementsKinds(site);
- JSObjectRef boilerplate = site.boilerplate().value();
- Node* value = effect =
- AllocateFastLiteral(effect, control, boilerplate, allocation);
+ Node* value = effect = maybe_value.value();
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -1209,7 +1212,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
const CreateFunctionContextParameters& parameters =
CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef scope_info(broker(), parameters.scope_info());
+ ScopeInfoRef scope_info = MakeRef(broker(), parameters.scope_info());
int slot_count = parameters.slot_count();
ScopeType scope_type = parameters.scope_type();
@@ -1249,7 +1252,7 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
- ScopeInfoRef scope_info(broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op()));
Node* extension = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1270,7 +1273,7 @@ Reduction JSCreateLowering::ReduceJSCreateWithContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateCatchContext, node->opcode());
- ScopeInfoRef scope_info(broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op()));
Node* exception = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -1291,7 +1294,7 @@ Reduction JSCreateLowering::ReduceJSCreateCatchContext(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- ScopeInfoRef scope_info(broker(), ScopeInfoOf(node->op()));
+ ScopeInfoRef scope_info = MakeRef(broker(), ScopeInfoOf(node->op()));
int const context_length = scope_info.ContextLength();
// Use inline allocation for block contexts up to a size limit.
@@ -1323,7 +1326,7 @@ base::Optional<MapRef> GetObjectCreateMap(JSHeapBroker* broker,
HeapObjectRef prototype) {
MapRef standard_map =
broker->target_native_context().object_function().initial_map();
- if (prototype.equals(standard_map.prototype())) {
+ if (prototype.equals(standard_map.prototype().value())) {
return standard_map;
}
if (prototype.map().oddball_type() == OddballType::kNull) {
@@ -1354,7 +1357,7 @@ Reduction JSCreateLowering::ReduceJSCreateObject(Node* node) {
if (instance_map.is_dictionary_map()) {
DCHECK_EQ(prototype_const.map().oddball_type(), OddballType::kNull);
// Allocate an empty NameDictionary as backing store for the properties.
- MapRef map(broker(), factory()->name_dictionary_map());
+ MapRef map = MakeRef(broker(), factory()->name_dictionary_map());
int capacity =
NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
DCHECK(base::bits::IsPowerOfTwo(capacity));
@@ -1430,7 +1433,7 @@ Node* JSCreateLowering::TryAllocateArguments(Node* effect, Node* control,
auto parameters_it = parameters_access.begin_without_receiver();
// Actually allocate the backing store.
- MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
if (!ab.CanAllocateArray(argument_count, fixed_array_map)) {
return nullptr;
@@ -1461,7 +1464,7 @@ Node* JSCreateLowering::TryAllocateRestArguments(Node* effect, Node* control,
parameters_access.begin_without_receiver_and_skip(start_index);
// Actually allocate the backing store.
- MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
if (!ab.CanAllocateArray(num_elements, fixed_array_map)) {
return nullptr;
@@ -1496,14 +1499,14 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
int mapped_count = std::min(argument_count, parameter_count);
*has_aliased_arguments = true;
- MapRef sloppy_arguments_elements_map(
- broker(), factory()->sloppy_arguments_elements_map());
+ MapRef sloppy_arguments_elements_map =
+ MakeRef(broker(), factory()->sloppy_arguments_elements_map());
if (!AllocationBuilder::CanAllocateSloppyArgumentElements(
mapped_count, sloppy_arguments_elements_map)) {
return nullptr;
}
- MapRef fixed_array_map(broker(), factory()->fixed_array_map());
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
if (!AllocationBuilder::CanAllocateArray(argument_count, fixed_array_map)) {
return nullptr;
}
@@ -1561,8 +1564,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
}
int mapped_count = parameter_count;
- MapRef sloppy_arguments_elements_map(
- broker(), factory()->sloppy_arguments_elements_map());
+ MapRef sloppy_arguments_elements_map =
+ MakeRef(broker(), factory()->sloppy_arguments_elements_map());
if (!AllocationBuilder::CanAllocateSloppyArgumentElements(
mapped_count, sloppy_arguments_elements_map)) {
return nullptr;
@@ -1617,7 +1620,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(capacity, MapRef(broker(), elements_map), allocation);
+ a.AllocateArray(capacity, MakeRef(broker(), elements_map), allocation);
for (int i = 0; i < capacity; ++i) {
Node* index = jsgraph()->Constant(i);
a.Store(access, index, value);
@@ -1642,7 +1645,7 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
// Actually allocate the backing store.
AllocationBuilder a(jsgraph(), effect, control);
- a.AllocateArray(capacity, MapRef(broker(), elements_map), allocation);
+ a.AllocateArray(capacity, MakeRef(broker(), elements_map), allocation);
for (int i = 0; i < capacity; ++i) {
Node* index = jsgraph()->Constant(i);
a.Store(access, index, values[i]);
@@ -1650,9 +1653,9 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
return a.Finish();
}
-Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
- JSObjectRef boilerplate,
- AllocationType allocation) {
+base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
+ Node* effect, Node* control, JSObjectRef boilerplate,
+ AllocationType allocation) {
// Compute the in-object properties to store first (might have effects).
MapRef boilerplate_map = boilerplate.map();
ZoneVector<std::pair<FieldAccess, Node*>> inobject_fields(zone());
@@ -1686,15 +1689,17 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
Node* value;
if (boilerplate_value.IsJSObject()) {
JSObjectRef boilerplate_object = boilerplate_value.AsJSObject();
- value = effect =
- AllocateFastLiteral(effect, control, boilerplate_object, allocation);
+ base::Optional<Node*> maybe_value = TryAllocateFastLiteral(
+ effect, control, boilerplate_object, allocation);
+ if (!maybe_value.has_value()) return {};
+ value = effect = maybe_value.value();
} else if (property_details.representation().IsDouble()) {
double number = boilerplate_value.AsHeapNumber().value();
// Allocate a mutable HeapNumber box and store the value into it.
AllocationBuilder builder(jsgraph(), effect, control);
builder.Allocate(HeapNumber::kSize, allocation);
builder.Store(AccessBuilder::ForMap(),
- MapRef(broker(), factory()->heap_number_map()));
+ MakeRef(broker(), factory()->heap_number_map()));
builder.Store(AccessBuilder::ForHeapNumberValue(),
jsgraph()->Constant(number));
value = effect = builder.Finish();
@@ -1712,6 +1717,9 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
int const boilerplate_length = boilerplate_map.GetInObjectProperties();
for (int index = static_cast<int>(inobject_fields.size());
index < boilerplate_length; ++index) {
+ DCHECK(!V8_MAP_PACKING_BOOL);
+ // TODO(wenyuzhao): Fix incorrect MachineType when V8_MAP_PACKING is
+ // enabled.
FieldAccess access =
AccessBuilder::ForJSObjectInObjectProperty(boilerplate_map, index);
Node* value = jsgraph()->HeapConstant(factory()->one_pointer_filler_map());
@@ -1719,8 +1727,10 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
}
// Setup the elements backing store.
- Node* elements =
- AllocateFastLiteralElements(effect, control, boilerplate, allocation);
+ base::Optional<Node*> maybe_elements =
+ TryAllocateFastLiteralElements(effect, control, boilerplate, allocation);
+ if (!maybe_elements.has_value()) return {};
+ Node* elements = maybe_elements.value();
if (elements->op()->EffectOutputCount() > 0) effect = elements;
// Actually allocate and initialize the object.
@@ -1743,9 +1753,9 @@ Node* JSCreateLowering::AllocateFastLiteral(Node* effect, Node* control,
return builder.Finish();
}
-Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
- JSObjectRef boilerplate,
- AllocationType allocation) {
+base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteralElements(
+ Node* effect, Node* control, JSObjectRef boilerplate,
+ AllocationType allocation) {
FixedArrayBaseRef boilerplate_elements = boilerplate.elements().value();
// Empty or copy-on-write elements just store a constant.
@@ -1764,7 +1774,7 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
if (elements_map.instance_type() == FIXED_DOUBLE_ARRAY_TYPE) {
FixedDoubleArrayRef elements = boilerplate_elements.AsFixedDoubleArray();
for (int i = 0; i < elements_length; ++i) {
- Float64 value = elements.get(i);
+ Float64 value = elements.GetFromImmutableFixedDoubleArray(i);
if (value.is_hole_nan()) {
elements_values[i] = jsgraph()->TheHoleConstant();
} else {
@@ -1774,10 +1784,14 @@ Node* JSCreateLowering::AllocateFastLiteralElements(Node* effect, Node* control,
} else {
FixedArrayRef elements = boilerplate_elements.AsFixedArray();
for (int i = 0; i < elements_length; ++i) {
- ObjectRef element_value = elements.get(i);
+ base::Optional<ObjectRef> maybe_element_value = elements.TryGet(i);
+ if (!maybe_element_value.has_value()) return {};
+ ObjectRef element_value = maybe_element_value.value();
if (element_value.IsJSObject()) {
- elements_values[i] = effect = AllocateFastLiteral(
+ base::Optional<Node*> maybe_value = TryAllocateFastLiteral(
effect, control, element_value.AsJSObject(), allocation);
+ if (!maybe_value.has_value()) return {};
+ elements_values[i] = effect = maybe_value.value();
} else {
elements_values[i] = jsgraph()->Constant(element_value);
}
diff --git a/chromium/v8/src/compiler/js-create-lowering.h b/chromium/v8/src/compiler/js-create-lowering.h
index 0edbda79a0e..e780b685bff 100644
--- a/chromium/v8/src/compiler/js-create-lowering.h
+++ b/chromium/v8/src/compiler/js-create-lowering.h
@@ -97,6 +97,12 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* arguments_length,
const SharedFunctionInfoRef& shared,
bool* has_aliased_arguments);
+ base::Optional<Node*> TryAllocateFastLiteral(Node* effect, Node* control,
+ JSObjectRef boilerplate,
+ AllocationType allocation);
+ base::Optional<Node*> TryAllocateFastLiteralElements(
+ Node* effect, Node* control, JSObjectRef boilerplate,
+ AllocationType allocation);
Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind, int capacity,
@@ -107,11 +113,6 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
ElementsKind elements_kind,
std::vector<Node*> const& values,
AllocationType allocation);
- Node* AllocateFastLiteral(Node* effect, Node* control,
- JSObjectRef boilerplate, AllocationType allocation);
- Node* AllocateFastLiteralElements(Node* effect, Node* control,
- JSObjectRef boilerplate,
- AllocationType allocation);
Node* AllocateLiteralRegExp(Node* effect, Node* control,
RegExpBoilerplateDescriptionRef boilerplate);
diff --git a/chromium/v8/src/compiler/js-generic-lowering.cc b/chromium/v8/src/compiler/js-generic-lowering.cc
index 71a0d43a415..935581b3462 100644
--- a/chromium/v8/src/compiler/js-generic-lowering.cc
+++ b/chromium/v8/src/compiler/js-generic-lowering.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast.h"
#include "src/builtins/builtins-constructor.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
@@ -715,8 +716,8 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
void JSGenericLowering::LowerJSGetTemplateObject(Node* node) {
JSGetTemplateObjectNode n(node);
GetTemplateObjectParameters const& p = n.Parameters();
- SharedFunctionInfoRef shared(broker(), p.shared());
- TemplateObjectDescriptionRef description(broker(), p.description());
+ SharedFunctionInfoRef shared = MakeRef(broker(), p.shared());
+ TemplateObjectDescriptionRef description = MakeRef(broker(), p.description());
DCHECK_EQ(node->op()->ControlInputCount(), 1);
node->RemoveInput(NodeProperties::FirstControlIndex(node));
@@ -1443,7 +1444,7 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
}
void JSGenericLowering::LowerJSDebugger(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::kHandleDebuggerStatement);
+ ReplaceWithRuntimeCall(node, Runtime::kHandleDebuggerStatement);
}
Zone* JSGenericLowering::zone() const { return graph()->zone(); }
diff --git a/chromium/v8/src/compiler/js-graph.cc b/chromium/v8/src/compiler/js-graph.cc
index aca12b4cb54..652c7391faf 100644
--- a/chromium/v8/src/compiler/js-graph.cc
+++ b/chromium/v8/src/compiler/js-graph.cc
@@ -20,7 +20,8 @@ namespace compiler {
Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
ArgvMode argv_mode, bool builtin_exit_frame) {
- if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack) {
+ if (save_doubles == SaveFPRegsMode::kIgnore &&
+ argv_mode == ArgvMode::kStack) {
DCHECK(result_size >= 1 && result_size <= 3);
if (!builtin_exit_frame) {
Node** ptr = nullptr;
diff --git a/chromium/v8/src/compiler/js-graph.h b/chromium/v8/src/compiler/js-graph.h
index e86bb594bac..c38d868db47 100644
--- a/chromium/v8/src/compiler/js-graph.h
+++ b/chromium/v8/src/compiler/js-graph.h
@@ -38,10 +38,9 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
JSGraph& operator=(const JSGraph&) = delete;
// CEntryStubs are cached depending on the result size and other flags.
- Node* CEntryStubConstant(int result_size,
- SaveFPRegsMode save_doubles = kDontSaveFPRegs,
- ArgvMode argv_mode = kArgvOnStack,
- bool builtin_exit_frame = false);
+ Node* CEntryStubConstant(
+ int result_size, SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore,
+ ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false);
// Used for padding frames. (alias: the hole)
Node* PaddingConstant() { return TheHoleConstant(); }
diff --git a/chromium/v8/src/compiler/js-heap-broker.cc b/chromium/v8/src/compiler/js-heap-broker.cc
index ac90e4c6673..13cd02ab081 100644
--- a/chromium/v8/src/compiler/js-heap-broker.cc
+++ b/chromium/v8/src/compiler/js-heap-broker.cc
@@ -4,291 +4,26 @@
#include "src/compiler/js-heap-broker.h"
-#include "src/common/globals.h"
-#include "src/compiler/heap-refs.h"
-
#ifdef ENABLE_SLOW_DCHECKS
#include <algorithm>
#endif
-#include "include/v8-fast-api-calls.h"
-#include "src/api/api-inl.h"
-#include "src/ast/modules.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/optimized-compilation-info.h"
-#include "src/compiler/access-info.h"
-#include "src/compiler/graph-reducer.h"
-#include "src/compiler/per-isolate-compiler-cache.h"
-#include "src/execution/protectors-inl.h"
+#include "src/handles/handles-inl.h"
+#include "src/ic/handler-configuration.h"
#include "src/init/bootstrapper.h"
-#include "src/objects/allocation-site-inl.h"
-#include "src/objects/api-callbacks.h"
-#include "src/objects/cell-inl.h"
-#include "src/objects/heap-number-inl.h"
-#include "src/objects/instance-type-inl.h"
-#include "src/objects/js-array-buffer-inl.h"
+#include "src/objects/feedback-cell.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/js-regexp-inl.h"
-#include "src/objects/literal-objects-inl.h"
-#include "src/objects/module-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/objects/property-details.h"
-#include "src/objects/template-objects-inl.h"
-#include "src/objects/templates.h"
-#include "src/utils/utils.h"
+#include "src/objects/oddball.h"
+#include "src/objects/property-cell.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE(broker, x) TRACE_BROKER(broker, x)
-#define TRACE_MISSING(broker, x) TRACE_BROKER_MISSING(broker, x)
-
-#define FORWARD_DECL(Name) class Name##Data;
-HEAP_BROKER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(FORWARD_DECL)
-#undef FORWARD_DECL
-
-// There are several kinds of ObjectData values.
-//
-// kSmi: The underlying V8 object is a Smi and the data is an instance of the
-// base class (ObjectData), i.e. it's basically just the handle. Because the
-// object is a Smi, it's safe to access the handle in order to extract the
-// number value, and AsSmi() does exactly that.
-//
-// kSerializedHeapObject: The underlying V8 object is a HeapObject and the
-// data is an instance of the corresponding (most-specific) subclass, e.g.
-// JSFunctionData, which provides serialized information about the object.
-//
-// kBackgroundSerializedHeapObject: Like kSerializedHeapObject, but
-// allows serialization from the background thread.
-//
-// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
-// data is an instance of the base class (ObjectData), i.e. it basically
-// carries no information other than the handle.
-//
-// kNeverSerializedHeapObject: The underlying V8 object is a (potentially
-// mutable) HeapObject and the data is an instance of ObjectData. Its handle
-// must be persistent so that the GC can update it at a safepoint. Via this
-// handle, the object can be accessed concurrently to the main thread. To be
-// used the flag --turbo-direct-heap-access must be on.
-//
-// kUnserializedReadOnlyHeapObject: The underlying V8 object is a read-only
-// HeapObject and the data is an instance of ObjectData. For
-// ReadOnlyHeapObjects, it is OK to access heap even from off-thread, so
-// these objects need not be serialized.
-enum ObjectDataKind {
- kSmi,
- kSerializedHeapObject,
- kBackgroundSerializedHeapObject,
- kUnserializedHeapObject,
- kNeverSerializedHeapObject,
- kUnserializedReadOnlyHeapObject
-};
-
-namespace {
-bool IsReadOnlyHeapObject(Object object) {
- DisallowGarbageCollection no_gc;
- return (object.IsCode() && Code::cast(object).is_builtin()) ||
- (object.IsHeapObject() &&
- ReadOnlyHeap::Contains(HeapObject::cast(object)));
-}
-
-template <class T>
-constexpr bool IsSerializedHeapObject() {
- return false;
-}
-
-#define DEFINE_MARKER(Name) \
- template <> \
- constexpr bool IsSerializedHeapObject<Name>() { \
- return true; \
- } \
- STATIC_ASSERT(IsSerializedHeapObject<Name>());
-HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_MARKER)
-#undef DEFINE_MARKER
-} // namespace
-
-class ObjectData : public ZoneObject {
- public:
- ObjectData(JSHeapBroker* broker, ObjectData** storage, Handle<Object> object,
- ObjectDataKind kind)
- : object_(object), kind_(kind) {
- // This assignment ensures we don't end up inserting the same object
- // in an endless recursion.
- *storage = this;
-
- TRACE(broker, "Creating data " << this << " for handle " << object.address()
- << " (" << Brief(*object) << ")");
-
- // It is safe to access read only heap objects and builtins from a
- // background thread. When we read fileds of these objects, we may create
- // ObjectData on the background thread even without a canonical handle
- // scope. This is safe too since we don't create handles but just get
- // handles from read only root table or builtins table which is what
- // canonical scope uses as well. For all other objects we should have
- // created ObjectData in canonical handle scope on the main thread.
- CHECK_IMPLIES(
- broker->mode() == JSHeapBroker::kDisabled ||
- broker->mode() == JSHeapBroker::kSerializing,
- broker->isolate()->handle_scope_data()->canonical_scope != nullptr);
- CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
- kind == kUnserializedReadOnlyHeapObject || kind == kSmi ||
- kind == kNeverSerializedHeapObject ||
- kind == kBackgroundSerializedHeapObject);
- CHECK_IMPLIES(kind == kUnserializedReadOnlyHeapObject,
- IsReadOnlyHeapObject(*object));
- }
-
-#define DECLARE_IS(Name) bool Is##Name() const;
- HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
- HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_IS)
- HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_IS)
- HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_IS)
-#undef DECLARE_IS
-
-#define DECLARE_AS(Name) Name##Data* As##Name();
- HEAP_BROKER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
- HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS)
- HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DECLARE_AS)
- HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DECLARE_AS)
-#undef DECLARE_AS
-
- Handle<Object> object() const { return object_; }
- ObjectDataKind kind() const { return kind_; }
- bool is_smi() const { return kind_ == kSmi; }
- bool should_access_heap() const {
- return kind_ == kUnserializedHeapObject ||
- kind_ == kNeverSerializedHeapObject ||
- kind_ == kUnserializedReadOnlyHeapObject;
- }
- bool IsNull() const { return object_->IsNull(); }
-
-#ifdef DEBUG
- enum class Usage{kUnused, kOnlyIdentityUsed, kDataUsed};
- mutable Usage used_status = Usage::kUnused;
-#endif // DEBUG
-
- private:
- Handle<Object> const object_;
- ObjectDataKind const kind_;
-};
-
-class HeapObjectData : public ObjectData {
- public:
- HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapObject> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
-
- bool boolean_value() const { return boolean_value_; }
- ObjectData* map() const { return map_; }
- InstanceType GetMapInstanceType() const;
-
- static HeapObjectData* Serialize(JSHeapBroker* broker,
- Handle<HeapObject> object);
-
- private:
- bool const boolean_value_;
- ObjectData* const map_;
-};
-
-class PropertyCellData : public HeapObjectData {
- public:
- PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<PropertyCell> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
-
- bool Serialize(JSHeapBroker* broker);
-
- PropertyDetails property_details() const {
- CHECK(serialized());
- return property_details_;
- }
-
- ObjectData* value() const {
- DCHECK(serialized());
- return value_;
- }
-
- private:
- PropertyDetails property_details_ = PropertyDetails::Empty();
- ObjectData* value_ = nullptr;
-
- bool serialized() const { return value_ != nullptr; }
-};
-
-// TODO(mslekova): Once we have real-world usage data, we might want to
-// reimplement this as sorted vector instead, to reduce the memory overhead.
-typedef ZoneMap<ObjectData*, HolderLookupResult> KnownReceiversMap;
-
-class FunctionTemplateInfoData : public HeapObjectData {
- public:
- FunctionTemplateInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FunctionTemplateInfo> object);
-
- bool is_signature_undefined() const { return is_signature_undefined_; }
- bool accept_any_receiver() const { return accept_any_receiver_; }
- bool has_call_code() const { return has_call_code_; }
-
- void SerializeCallCode(JSHeapBroker* broker);
- ObjectData* call_code() const { return call_code_; }
- Address c_function() const { return c_function_; }
- const CFunctionInfo* c_signature() const { return c_signature_; }
- KnownReceiversMap& known_receivers() { return known_receivers_; }
-
- private:
- bool is_signature_undefined_ = false;
- bool accept_any_receiver_ = false;
- bool has_call_code_ = false;
-
- ObjectData* call_code_ = nullptr;
- const Address c_function_;
- const CFunctionInfo* const c_signature_;
- KnownReceiversMap known_receivers_;
-};
-
-class CallHandlerInfoData : public HeapObjectData {
- public:
- CallHandlerInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<CallHandlerInfo> object);
-
- Address callback() const { return callback_; }
-
- void Serialize(JSHeapBroker* broker);
- ObjectData* data() const { return data_; }
-
- private:
- Address const callback_;
-
- ObjectData* data_ = nullptr;
-};
-
-FunctionTemplateInfoData::FunctionTemplateInfoData(
- JSHeapBroker* broker, ObjectData** storage,
- Handle<FunctionTemplateInfo> object)
- : HeapObjectData(broker, storage, object),
- c_function_(v8::ToCData<Address>(object->GetCFunction())),
- c_signature_(v8::ToCData<CFunctionInfo*>(object->GetCSignature())),
- known_receivers_(broker->zone()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- auto function_template_info = Handle<FunctionTemplateInfo>::cast(object);
- is_signature_undefined_ =
- function_template_info->signature().IsUndefined(broker->isolate());
- accept_any_receiver_ = function_template_info->accept_any_receiver();
-
- CallOptimization call_optimization(broker->isolate(), object);
- has_call_code_ = call_optimization.is_simple_api_call();
-}
-
-CallHandlerInfoData::CallHandlerInfoData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<CallHandlerInfo> object)
- : HeapObjectData(broker, storage, object),
- callback_(v8::ToCData<Address>(object->callback())) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
// These definitions are here in order to please the linker, which in debug mode
// sometimes requires static constants to be defined in .cc files.
@@ -299,2243 +34,6 @@ void JSHeapBroker::IncrementTracingIndentation() { ++trace_indentation_; }
void JSHeapBroker::DecrementTracingIndentation() { --trace_indentation_; }
-PropertyCellData::PropertyCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<PropertyCell> object,
- ObjectDataKind kind)
- : HeapObjectData(broker, storage, object, kind) {}
-
-bool PropertyCellData::Serialize(JSHeapBroker* broker) {
- if (serialized()) return true;
-
- TraceScope tracer(broker, this, "PropertyCellData::Serialize");
- auto cell = Handle<PropertyCell>::cast(object());
-
- // While this code runs on a background thread, the property cell might
- // undergo state transitions via calls to PropertyCell::Transition. These
- // transitions follow a certain protocol on which we rely here to ensure that
- // we only report success when we can guarantee consistent data. A key
- // property is that after transitioning from cell type A to B (A != B), there
- // will never be a transition back to A, unless A is kConstant and the new
- // value is the hole (i.e. the property cell was invalidated, which is a final
- // state).
-
- PropertyDetails property_details = cell->property_details(kAcquireLoad);
-
- Handle<Object> value =
- broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
- if (broker->ObjectMayBeUninitialized(value)) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
-
- {
- PropertyDetails property_details_again =
- cell->property_details(kAcquireLoad);
- if (property_details != property_details_again) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
- }
-
- if (property_details.cell_type() == PropertyCellType::kConstant) {
- Handle<Object> value_again =
- broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
- if (*value != *value_again) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
- }
-
- ObjectData* value_data = broker->TryGetOrCreateData(value, false);
- if (value_data == nullptr) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
-
- PropertyCell::CheckDataIsCompatible(property_details, *value);
-
- DCHECK(!serialized());
- property_details_ = property_details;
- value_ = value_data;
- DCHECK(serialized());
- return true;
-}
-
-void FunctionTemplateInfoData::SerializeCallCode(JSHeapBroker* broker) {
- if (call_code_ != nullptr) return;
-
- TraceScope tracer(broker, this,
- "FunctionTemplateInfoData::SerializeCallCode");
- auto function_template_info = Handle<FunctionTemplateInfo>::cast(object());
- call_code_ =
- broker->GetOrCreateData(function_template_info->call_code(kAcquireLoad));
- if (call_code_->should_access_heap()) {
- // TODO(mvstanton): When ObjectRef is in the never serialized list, this
- // code can be removed.
- broker->GetOrCreateData(
- Handle<CallHandlerInfo>::cast(call_code_->object())->data());
- } else {
- call_code_->AsCallHandlerInfo()->Serialize(broker);
- }
-}
-
-void CallHandlerInfoData::Serialize(JSHeapBroker* broker) {
- if (data_ != nullptr) return;
-
- TraceScope tracer(broker, this, "CallHandlerInfoData::Serialize");
- auto call_handler_info = Handle<CallHandlerInfo>::cast(object());
- data_ = broker->GetOrCreateData(call_handler_info->data());
-}
-
-class JSReceiverData : public HeapObjectData {
- public:
- JSReceiverData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSReceiver> object)
- : HeapObjectData(broker, storage, object) {}
-};
-
-class JSObjectData : public JSReceiverData {
- public:
- JSObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSObject> object);
-
- // Recursive serialization of all reachable JSObjects.
- void SerializeAsBoilerplate(JSHeapBroker* broker);
- ObjectData* GetInobjectField(int property_index) const;
-
- // Shallow serialization of {elements}.
- void SerializeElements(JSHeapBroker* broker);
- bool serialized_elements() const { return serialized_elements_; }
- ObjectData* elements() const;
-
- void SerializeObjectCreateMap(JSHeapBroker* broker);
-
- ObjectData* object_create_map(
- JSHeapBroker* broker) const { // Can be nullptr.
- if (!serialized_object_create_map_) {
- DCHECK_NULL(object_create_map_);
- TRACE_MISSING(broker, "object_create_map on " << this);
- }
- return object_create_map_;
- }
-
- ObjectData* GetOwnConstantElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnFastDataProperty(
- JSHeapBroker* broker, Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy);
-
- // This method is only used to assert our invariants.
- bool cow_or_empty_elements_tenured() const;
-
- private:
- void SerializeRecursiveAsBoilerplate(JSHeapBroker* broker, int max_depths);
-
- ObjectData* elements_ = nullptr;
- bool cow_or_empty_elements_tenured_ = false;
- // The {serialized_as_boilerplate} flag is set when all recursively
- // reachable JSObjects are serialized.
- bool serialized_as_boilerplate_ = false;
- bool serialized_elements_ = false;
-
- ZoneVector<ObjectData*> inobject_fields_;
-
- bool serialized_object_create_map_ = false;
- ObjectData* object_create_map_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object as non-writable and
- // non-configurable, or (2) are known not to (possibly they don't exist at
- // all). In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
- // Properties that either:
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- // For simplicity, this may in theory overlap with inobject_fields_.
- // For fast mode objects, the keys of the map are the property_index() values
- // of the respective property FieldIndex'es. For slow mode objects, the keys
- // are the dictionary indicies.
- ZoneUnorderedMap<int, ObjectData*> own_properties_;
-};
-
-void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker) {
- if (serialized_object_create_map_) return;
- serialized_object_create_map_ = true;
-
- TraceScope tracer(broker, this, "JSObjectData::SerializeObjectCreateMap");
- Handle<JSObject> jsobject = Handle<JSObject>::cast(object());
-
- if (jsobject->map().is_prototype_map()) {
- Handle<Object> maybe_proto_info(jsobject->map().prototype_info(),
- broker->isolate());
- if (maybe_proto_info->IsPrototypeInfo()) {
- auto proto_info = Handle<PrototypeInfo>::cast(maybe_proto_info);
- if (proto_info->HasObjectCreateMap()) {
- DCHECK_NULL(object_create_map_);
- object_create_map_ =
- broker->GetOrCreateData(proto_info->ObjectCreateMap());
- }
- }
- }
-}
-
-namespace {
-
-base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
- Handle<Object> receiver,
- uint32_t index,
- bool constant_only) {
- LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA &&
- (!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
- return ObjectRef(broker,
- broker->CanonicalPersistentHandle(it.GetDataValue()));
- }
- return base::nullopt;
-}
-
-ObjectRef GetOwnFastDataPropertyFromHeap(JSHeapBroker* broker,
- Handle<JSObject> receiver,
- Representation representation,
- FieldIndex field_index) {
- Handle<Object> constant =
- JSObject::FastPropertyAt(receiver, representation, field_index);
- return ObjectRef(broker, constant);
-}
-
-ObjectRef GetOwnDictionaryPropertyFromHeap(JSHeapBroker* broker,
- Handle<JSObject> receiver,
- InternalIndex dict_index) {
- Handle<Object> constant =
- JSObject::DictionaryPropertyAt(receiver, dict_index);
- return ObjectRef(broker, constant);
-}
-
-} // namespace
-
-ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
- uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_constant_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, true);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_constant_elements_.push_back({index, result});
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnFastDataProperty(JSHeapBroker* broker,
- Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(field_index.property_index());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about fast property with index "
- << field_index.property_index() << " on "
- << this);
- return nullptr;
- }
-
- ObjectRef property = GetOwnFastDataPropertyFromHeap(
- broker, Handle<JSObject>::cast(object()), representation, field_index);
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(field_index.property_index(), result));
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(dict_index.as_int());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about dictionary property with index "
- << dict_index.as_int() << " on " << this);
- return nullptr;
- }
-
- ObjectRef property = GetOwnDictionaryPropertyFromHeap(
- broker, Handle<JSObject>::cast(object()), dict_index);
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(dict_index.as_int(), result));
- return result;
-}
-
-class JSTypedArrayData : public JSObjectData {
- public:
- JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSTypedArray> object)
- : JSObjectData(broker, storage, object) {}
-
- // TODO(v8:7790): Once JSObject is no longer serialized, also make
- // JSTypedArrayRef never-serialized.
- STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
-
- void Serialize(JSHeapBroker* broker);
- bool serialized() const { return serialized_; }
-
- bool is_on_heap() const { return is_on_heap_; }
- size_t length() const { return length_; }
- void* data_ptr() const { return data_ptr_; }
-
- ObjectData* buffer() const { return buffer_; }
-
- private:
- bool serialized_ = false;
- bool is_on_heap_ = false;
- size_t length_ = 0;
- void* data_ptr_ = nullptr;
- ObjectData* buffer_ = nullptr;
-};
-
-void JSTypedArrayData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "JSTypedArrayData::Serialize");
- Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object());
-
- is_on_heap_ = typed_array->is_on_heap();
- length_ = typed_array->length();
- data_ptr_ = typed_array->DataPtr();
-
- if (!is_on_heap()) {
- DCHECK_NULL(buffer_);
- buffer_ = broker->GetOrCreateData(typed_array->buffer());
- }
-}
-
-class ArrayBoilerplateDescriptionData : public HeapObjectData {
- public:
- ArrayBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<ArrayBoilerplateDescription> object)
- : HeapObjectData(broker, storage, object),
- constants_elements_length_(object->constant_elements().length()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-
- int constants_elements_length() const { return constants_elements_length_; }
-
- private:
- int const constants_elements_length_;
-};
-
-class ObjectBoilerplateDescriptionData : public HeapObjectData {
- public:
- ObjectBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<ObjectBoilerplateDescription> object)
- : HeapObjectData(broker, storage, object), size_(object->size()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-
- int size() const { return size_; }
-
- private:
- int const size_;
-};
-
-class JSDataViewData : public JSObjectData {
- public:
- JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSDataView> object);
-
- size_t byte_length() const { return byte_length_; }
-
- private:
- size_t const byte_length_;
-};
-
-class JSBoundFunctionData : public JSObjectData {
- public:
- JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSBoundFunction> object);
-
- bool Serialize(JSHeapBroker* broker);
- bool serialized() const { return serialized_; }
-
- ObjectData* bound_target_function() const {
- DCHECK(!FLAG_turbo_direct_heap_access);
- return bound_target_function_;
- }
- ObjectData* bound_this() const {
- DCHECK(!FLAG_turbo_direct_heap_access);
- return bound_this_;
- }
- ObjectData* bound_arguments() const {
- DCHECK(!FLAG_turbo_direct_heap_access);
- return bound_arguments_;
- }
-
- private:
- bool serialized_ = false;
-
- ObjectData* bound_target_function_ = nullptr;
- ObjectData* bound_this_ = nullptr;
- ObjectData* bound_arguments_ = nullptr;
-};
-
-class JSFunctionData : public JSObjectData {
- public:
- JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSFunction> object);
-
- bool has_feedback_vector() const { return has_feedback_vector_; }
- bool has_initial_map() const { return has_initial_map_; }
- bool has_prototype() const { return has_prototype_; }
- bool PrototypeRequiresRuntimeLookup() const {
- return PrototypeRequiresRuntimeLookup_;
- }
-
- void Serialize(JSHeapBroker* broker);
- bool serialized() const { return serialized_; }
-
- void SerializeCodeAndFeedback(JSHeapBroker* broker);
- bool serialized_code_and_feedback() const {
- return serialized_code_and_feedback_;
- }
-
- ObjectData* context() const { return context_; }
- ObjectData* native_context() const { return native_context_; }
- ObjectData* initial_map() const { return initial_map_; }
- ObjectData* prototype() const { return prototype_; }
- ObjectData* shared() const { return shared_; }
- ObjectData* raw_feedback_cell() const {
- DCHECK(serialized_code_and_feedback());
- return feedback_cell_;
- }
- ObjectData* feedback_vector() const {
- DCHECK(serialized_code_and_feedback());
- return feedback_vector_;
- }
- ObjectData* code() const {
- DCHECK(serialized_code_and_feedback());
- DCHECK(!FLAG_turbo_direct_heap_access);
- return code_;
- }
- int initial_map_instance_size_with_min_slack() const {
- CHECK(serialized_);
- return initial_map_instance_size_with_min_slack_;
- }
-
- private:
- bool has_feedback_vector_;
- bool has_initial_map_;
- bool has_prototype_;
- bool PrototypeRequiresRuntimeLookup_;
-
- bool serialized_ = false;
- bool serialized_code_and_feedback_ = false;
-
- ObjectData* context_ = nullptr;
- ObjectData* native_context_ = nullptr;
- ObjectData* initial_map_ = nullptr;
- ObjectData* prototype_ = nullptr;
- ObjectData* shared_ = nullptr;
- ObjectData* feedback_vector_ = nullptr;
- ObjectData* feedback_cell_ = nullptr;
- ObjectData* code_ = nullptr;
- int initial_map_instance_size_with_min_slack_;
-};
-
-class RegExpBoilerplateDescriptionData : public HeapObjectData {
- public:
- RegExpBoilerplateDescriptionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<RegExpBoilerplateDescription> object)
- : HeapObjectData(broker, storage, object) {}
-
- void Serialize(JSHeapBroker* broker);
- ObjectData* data() const {
- CHECK(serialized_);
- return data_;
- }
- ObjectData* source() const {
- CHECK(serialized_);
- return source_;
- }
- int flags() const {
- CHECK(serialized_);
- return flags_;
- }
-
- private:
- bool serialized_ = false;
- ObjectData* data_ = nullptr;
- ObjectData* source_ = nullptr;
- int flags_;
-};
-
-class HeapNumberData : public HeapObjectData {
- public:
- HeapNumberData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapNumber> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
- : HeapObjectData(broker, storage, object, kind),
- value_(object->value()) {}
-
- double value() const { return value_; }
-
- private:
- double const value_;
-};
-
-class ContextData : public HeapObjectData {
- public:
- ContextData(JSHeapBroker* broker, ObjectData** storage,
- Handle<Context> object);
-
- ObjectData* previous(
- JSHeapBroker* broker,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- // Returns nullptr if the slot index isn't valid or wasn't serialized,
- // unless {policy} is {kSerializeIfNeeded}.
- ObjectData* GetSlot(
- JSHeapBroker* broker, int index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- ZoneMap<int, ObjectData*> slots_;
- ObjectData* previous_ = nullptr;
-};
-
-ContextData::ContextData(JSHeapBroker* broker, ObjectData** storage,
- Handle<Context> object)
- : HeapObjectData(broker, storage, object), slots_(broker->zone()) {}
-
-ObjectData* ContextData::previous(JSHeapBroker* broker,
- SerializationPolicy policy) {
- if (policy == SerializationPolicy::kSerializeIfNeeded &&
- previous_ == nullptr) {
- TraceScope tracer(broker, this, "ContextData::previous");
- Handle<Context> context = Handle<Context>::cast(object());
- previous_ = broker->GetOrCreateData(context->unchecked_previous());
- }
- return previous_;
-}
-
-ObjectData* ContextData::GetSlot(JSHeapBroker* broker, int index,
- SerializationPolicy policy) {
- CHECK_GE(index, 0);
- auto search = slots_.find(index);
- if (search != slots_.end()) {
- return search->second;
- }
-
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- Handle<Context> context = Handle<Context>::cast(object());
- if (index < context->length()) {
- TraceScope tracer(broker, this, "ContextData::GetSlot");
- TRACE(broker, "Serializing context slot " << index);
- ObjectData* odata = broker->GetOrCreateData(context->get(index));
- slots_.insert(std::make_pair(index, odata));
- return odata;
- }
- }
-
- return nullptr;
-}
-
-class NativeContextData : public ContextData {
- public:
-#define DECL_ACCESSOR(type, name) \
- ObjectData* name() const { return name##_; }
- BROKER_NATIVE_CONTEXT_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- const ZoneVector<ObjectData*>& function_maps() const {
- CHECK_NE(state_, State::kUnserialized);
- return function_maps_;
- }
-
- ObjectData* scope_info() const {
- CHECK_NE(state_, State::kUnserialized);
- return scope_info_;
- }
-
- NativeContextData(JSHeapBroker* broker, ObjectData** storage,
- Handle<NativeContext> object);
- void Serialize(JSHeapBroker* broker);
- void SerializeOnBackground(JSHeapBroker* broker);
-
- private:
- // After Serialize is called the class is partially serialized and it the
- // kSerializedOnMainThread state. It then becomes kFullySerialized once
- // SerializeOnBackground is called.
- enum class State { kUnserialized, kSerializedOnMainThread, kFullySerialized };
- State state_;
-
-#define DECL_MEMBER(type, name) ObjectData* name##_ = nullptr;
- BROKER_NATIVE_CONTEXT_FIELDS(DECL_MEMBER)
-#undef DECL_MEMBER
- ZoneVector<ObjectData*> function_maps_;
- ObjectData* scope_info_ = nullptr;
-};
-
-class NameData : public HeapObjectData {
- public:
- NameData(JSHeapBroker* broker, ObjectData** storage, Handle<Name> object)
- : HeapObjectData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-};
-
-class StringData : public NameData {
- public:
- StringData(JSHeapBroker* broker, ObjectData** storage, Handle<String> object);
-
- int length() const { return length_; }
- uint16_t first_char() const { return first_char_; }
- base::Optional<double> to_number() const { return to_number_; }
- bool is_external_string() const { return is_external_string_; }
- bool is_seq_string() const { return is_seq_string_; }
-
- ObjectData* GetCharAsStringOrUndefined(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- int const length_;
- uint16_t const first_char_;
- base::Optional<double> to_number_;
- bool const is_external_string_;
- bool const is_seq_string_;
-
- // Known individual characters as strings, corresponding to the semantics of
- // element access (s[i]). The first pair component is always less than
- // {length_}. The second component is never nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> chars_as_strings_;
-};
-
-class SymbolData : public NameData {
- public:
- SymbolData(JSHeapBroker* broker, ObjectData** storage, Handle<Symbol> object)
- : NameData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-};
-
-StringData::StringData(JSHeapBroker* broker, ObjectData** storage,
- Handle<String> object)
- : NameData(broker, storage, object),
- length_(object->length()),
- first_char_(length_ > 0 ? object->Get(0) : 0),
- to_number_(TryStringToDouble(broker->local_isolate(), object)),
- is_external_string_(object->IsExternalString()),
- is_seq_string_(object->IsSeqString()),
- chars_as_strings_(broker->zone()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
-
-class InternalizedStringData : public StringData {
- public:
- InternalizedStringData(JSHeapBroker* broker, ObjectData** storage,
- Handle<InternalizedString> object)
- : StringData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-};
-
-ObjectData* StringData::GetCharAsStringOrUndefined(JSHeapBroker* broker,
- uint32_t index,
- SerializationPolicy policy) {
- if (index >= static_cast<uint32_t>(length())) return nullptr;
-
- for (auto const& p : chars_as_strings_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, true);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- chars_as_strings_.push_back({index, result});
- return result;
-}
-
-namespace {
-
-bool IsFastLiteralHelper(Handle<JSObject> boilerplate, int max_depth,
- int* max_properties) {
- DCHECK_GE(max_depth, 0);
- DCHECK_GE(*max_properties, 0);
-
- Isolate* const isolate = boilerplate->GetIsolate();
-
- // Make sure the boilerplate map is not deprecated.
- if (!JSObject::TryMigrateInstance(isolate, boilerplate)) return false;
-
- // Check for too deep nesting.
- if (max_depth == 0) return false;
-
- // Check the elements.
- Handle<FixedArrayBase> elements(boilerplate->elements(), isolate);
- if (elements->length() > 0 &&
- elements->map() != ReadOnlyRoots(isolate).fixed_cow_array_map()) {
- if (boilerplate->HasSmiOrObjectElements()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- int length = elements->length();
- for (int i = 0; i < length; i++) {
- if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteralHelper(value_object, max_depth - 1,
- max_properties)) {
- return false;
- }
- }
- }
- } else if (boilerplate->HasDoubleElements()) {
- if (elements->Size() > kMaxRegularHeapObjectSize) return false;
- } else {
- return false;
- }
- }
-
- // TODO(turbofan): Do we want to support out-of-object properties?
- if (!(boilerplate->HasFastProperties() &&
- boilerplate->property_array().length() == 0)) {
- return false;
- }
-
- // Check the in-object properties.
- Handle<DescriptorArray> descriptors(
- boilerplate->map().instance_descriptors(isolate, kRelaxedLoad), isolate);
- for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
- if ((*max_properties)-- == 0) return false;
- FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- if (!IsFastLiteralHelper(value_object, max_depth - 1, max_properties)) {
- return false;
- }
- }
- }
- return true;
-}
-
-// Maximum depth and total number of elements and properties for literal
-// graphs to be considered for fast deep-copying. The limit is chosen to
-// match the maximum number of inobject properties, to ensure that the
-// performance of using object literals is not worse than using constructor
-// functions, see crbug.com/v8/6211 for details.
-const int kMaxFastLiteralDepth = 3;
-const int kMaxFastLiteralProperties = JSObject::kMaxInObjectProperties;
-
-// Determines whether the given array or object literal boilerplate satisfies
-// all limits to be considered for fast deep-copying and computes the total
-// size of all objects that are part of the graph.
-bool IsInlinableFastLiteral(Handle<JSObject> boilerplate) {
- int max_properties = kMaxFastLiteralProperties;
- return IsFastLiteralHelper(boilerplate, kMaxFastLiteralDepth,
- &max_properties);
-}
-
-} // namespace
-
-class AccessorInfoData : public HeapObjectData {
- public:
- AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<AccessorInfo> object);
-};
-
-class AllocationSiteData : public HeapObjectData {
- public:
- AllocationSiteData(JSHeapBroker* broker, ObjectData** storage,
- Handle<AllocationSite> object);
- void SerializeBoilerplate(JSHeapBroker* broker);
-
- bool PointsToLiteral() const { return PointsToLiteral_; }
- AllocationType GetAllocationType() const { return GetAllocationType_; }
- ObjectData* nested_site() const { return nested_site_; }
- bool IsFastLiteral() const { return IsFastLiteral_; }
- ObjectData* boilerplate() const { return boilerplate_; }
-
- // These are only valid if PointsToLiteral is false.
- ElementsKind GetElementsKind() const { return GetElementsKind_; }
- bool CanInlineCall() const { return CanInlineCall_; }
-
- private:
- bool const PointsToLiteral_;
- AllocationType const GetAllocationType_;
- ObjectData* nested_site_ = nullptr;
- bool IsFastLiteral_ = false;
- ObjectData* boilerplate_ = nullptr;
- ElementsKind GetElementsKind_ = NO_ELEMENTS;
- bool CanInlineCall_ = false;
- bool serialized_boilerplate_ = false;
-};
-
-class BigIntData : public HeapObjectData {
- public:
- BigIntData(JSHeapBroker* broker, ObjectData** storage, Handle<BigInt> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject)
- : HeapObjectData(broker, storage, object, kind),
- as_uint64_(object->AsUint64(nullptr)) {}
-
- uint64_t AsUint64() const { return as_uint64_; }
-
- private:
- const uint64_t as_uint64_;
-};
-
-// Only used in JSNativeContextSpecialization.
-class ScriptContextTableData : public HeapObjectData {
- public:
- ScriptContextTableData(JSHeapBroker* broker, ObjectData** storage,
- Handle<ScriptContextTable> object)
- : HeapObjectData(broker, storage, object) {}
-};
-
-struct PropertyDescriptor {
- ObjectData* key = nullptr;
- ObjectData* value = nullptr;
- PropertyDetails details = PropertyDetails::Empty();
- FieldIndex field_index;
- ObjectData* field_owner = nullptr;
- ObjectData* field_type = nullptr;
-};
-
-class MapData : public HeapObjectData {
- public:
- MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
- ObjectDataKind kind = ObjectDataKind::kSerializedHeapObject);
-
- InstanceType instance_type() const { return instance_type_; }
- int instance_size() const { return instance_size_; }
- byte bit_field() const { return bit_field_; }
- byte bit_field2() const { return bit_field2_; }
- uint32_t bit_field3() const { return bit_field3_; }
- bool can_be_deprecated() const { return can_be_deprecated_; }
- bool can_transition() const { return can_transition_; }
- int in_object_properties_start_in_words() const {
- CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
- return in_object_properties_start_in_words_;
- }
- int in_object_properties() const {
- CHECK(InstanceTypeChecker::IsJSObject(instance_type()));
- return in_object_properties_;
- }
- int constructor_function_index() const { return constructor_function_index_; }
- int NextFreePropertyIndex() const { return next_free_property_index_; }
- int UnusedPropertyFields() const { return unused_property_fields_; }
- bool supports_fast_array_iteration() const {
- return supports_fast_array_iteration_;
- }
- bool supports_fast_array_resize() const {
- return supports_fast_array_resize_;
- }
- bool is_abandoned_prototype_map() const {
- return is_abandoned_prototype_map_;
- }
-
- // Extra information.
-
- void SerializeElementsKindGeneralizations(JSHeapBroker* broker);
- const ZoneVector<ObjectData*>& elements_kind_generalizations() const {
- CHECK(serialized_elements_kind_generalizations_);
- return elements_kind_generalizations_;
- }
-
- // Serialize a single (or all) own slot(s) of the descriptor array and recurse
- // on field owner(s).
- void SerializeOwnDescriptor(JSHeapBroker* broker,
- InternalIndex descriptor_index);
- void SerializeOwnDescriptors(JSHeapBroker* broker);
- ObjectData* GetStrongValue(InternalIndex descriptor_index) const;
- ObjectData* instance_descriptors() const { return instance_descriptors_; }
-
- void SerializeRootMap(JSHeapBroker* broker);
- ObjectData* FindRootMap() const;
-
- void SerializeConstructor(JSHeapBroker* broker);
- ObjectData* GetConstructor() const {
- CHECK(serialized_constructor_);
- return constructor_;
- }
-
- void SerializeBackPointer(JSHeapBroker* broker);
- ObjectData* GetBackPointer() const {
- CHECK(serialized_backpointer_);
- return backpointer_;
- }
-
- void SerializePrototype(JSHeapBroker* broker);
- bool serialized_prototype() const { return serialized_prototype_; }
- ObjectData* prototype() const {
- CHECK(serialized_prototype_);
- return prototype_;
- }
-
- void SerializeForElementLoad(JSHeapBroker* broker);
-
- void SerializeForElementStore(JSHeapBroker* broker);
-
- private:
- InstanceType const instance_type_;
- int const instance_size_;
- byte const bit_field_;
- byte const bit_field2_;
- uint32_t const bit_field3_;
- bool const can_be_deprecated_;
- bool const can_transition_;
- int const in_object_properties_start_in_words_;
- int const in_object_properties_;
- int const constructor_function_index_;
- int const next_free_property_index_;
- int const unused_property_fields_;
- bool const supports_fast_array_iteration_;
- bool const supports_fast_array_resize_;
- bool const is_abandoned_prototype_map_;
-
- bool serialized_elements_kind_generalizations_ = false;
- ZoneVector<ObjectData*> elements_kind_generalizations_;
-
- bool serialized_own_descriptors_ = false;
- ObjectData* instance_descriptors_ = nullptr;
-
- bool serialized_constructor_ = false;
- ObjectData* constructor_ = nullptr;
-
- bool serialized_backpointer_ = false;
- ObjectData* backpointer_ = nullptr;
-
- bool serialized_prototype_ = false;
- ObjectData* prototype_ = nullptr;
-
- bool serialized_root_map_ = false;
- ObjectData* root_map_ = nullptr;
-
- bool serialized_for_element_load_ = false;
-
- bool serialized_for_element_store_ = false;
-};
-
-AccessorInfoData::AccessorInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<AccessorInfo> object)
- : HeapObjectData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
-
-AllocationSiteData::AllocationSiteData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<AllocationSite> object)
- : HeapObjectData(broker, storage, object),
- PointsToLiteral_(object->PointsToLiteral()),
- GetAllocationType_(object->GetAllocationType()) {
- if (PointsToLiteral_) {
- IsFastLiteral_ = IsInlinableFastLiteral(
- handle(object->boilerplate(), broker->isolate()));
- } else {
- GetElementsKind_ = object->GetElementsKind();
- CanInlineCall_ = object->CanInlineCall();
- }
-}
-
-void AllocationSiteData::SerializeBoilerplate(JSHeapBroker* broker) {
- if (serialized_boilerplate_) return;
- serialized_boilerplate_ = true;
-
- TraceScope tracer(broker, this, "AllocationSiteData::SerializeBoilerplate");
- Handle<AllocationSite> site = Handle<AllocationSite>::cast(object());
-
- CHECK(IsFastLiteral_);
- DCHECK_NULL(boilerplate_);
- boilerplate_ = broker->GetOrCreateData(site->boilerplate());
- if (!boilerplate_->should_access_heap()) {
- boilerplate_->AsJSObject()->SerializeAsBoilerplate(broker);
- }
-
- DCHECK_NULL(nested_site_);
- nested_site_ = broker->GetOrCreateData(site->nested_site());
- if (nested_site_->IsAllocationSite() && !nested_site_->should_access_heap()) {
- nested_site_->AsAllocationSite()->SerializeBoilerplate(broker);
- }
-}
-
-HeapObjectData::HeapObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<HeapObject> object, ObjectDataKind kind)
- : ObjectData(broker, storage, object, kind),
- boolean_value_(object->BooleanValue(broker->isolate())),
- // We have to use a raw cast below instead of AsMap() because of
- // recursion. AsMap() would call IsMap(), which accesses the
- // instance_type_ member. In the case of constructing the MapData for the
- // meta map (whose map is itself), this member has not yet been
- // initialized.
- map_(broker->GetOrCreateData(object->map())) {
- CHECK_IMPLIES(kind == kSerializedHeapObject,
- broker->mode() == JSHeapBroker::kSerializing);
- CHECK_IMPLIES(broker->mode() == JSHeapBroker::kSerialized,
- kind == kBackgroundSerializedHeapObject);
-}
-
-InstanceType HeapObjectData::GetMapInstanceType() const {
- ObjectData* map_data = map();
- if (map_data->should_access_heap()) {
- return Handle<Map>::cast(map_data->object())->instance_type();
- }
- return map_data->AsMap()->instance_type();
-}
-
-namespace {
-bool IsReadOnlyLengthDescriptor(Isolate* isolate, Handle<Map> jsarray_map) {
- DCHECK(!jsarray_map->is_dictionary_map());
- Handle<Name> length_string = isolate->factory()->length_string();
- DescriptorArray descriptors =
- jsarray_map->instance_descriptors(isolate, kRelaxedLoad);
- // TODO(jkummerow): We could skip the search and hardcode number == 0.
- InternalIndex number = descriptors.Search(*length_string, *jsarray_map);
- DCHECK(number.is_found());
- return descriptors.GetDetails(number).IsReadOnly();
-}
-
-bool SupportsFastArrayIteration(Isolate* isolate, Handle<Map> map) {
- return map->instance_type() == JS_ARRAY_TYPE &&
- IsFastElementsKind(map->elements_kind()) &&
- map->prototype().IsJSArray() &&
- isolate->IsAnyInitialArrayPrototype(JSArray::cast(map->prototype())) &&
- Protectors::IsNoElementsIntact(isolate);
-}
-
-bool SupportsFastArrayResize(Isolate* isolate, Handle<Map> map) {
- return SupportsFastArrayIteration(isolate, map) && map->is_extensible() &&
- !map->is_dictionary_map() && !IsReadOnlyLengthDescriptor(isolate, map);
-}
-} // namespace
-
-MapData::MapData(JSHeapBroker* broker, ObjectData** storage, Handle<Map> object,
- ObjectDataKind kind)
- : HeapObjectData(broker, storage, object, kind),
- instance_type_(object->instance_type()),
- instance_size_(object->instance_size()),
- // We read the bit_field as relaxed since `has_non_instance_prototype` can
- // be modified in live objects, and because we serialize some maps on the
- // background. Those background-serialized maps are the native context's
- // maps for which this bit is "set" but it doesn't change value (i.e. it
- // is set to false when it was already false).
- bit_field_(object->relaxed_bit_field()),
- bit_field2_(object->bit_field2()),
- bit_field3_(object->bit_field3()),
- can_be_deprecated_(object->NumberOfOwnDescriptors() > 0
- ? object->CanBeDeprecated()
- : false),
- can_transition_(object->CanTransition()),
- in_object_properties_start_in_words_(
- object->IsJSObjectMap() ? object->GetInObjectPropertiesStartInWords()
- : 0),
- in_object_properties_(
- object->IsJSObjectMap() ? object->GetInObjectProperties() : 0),
- constructor_function_index_(object->IsPrimitiveMap()
- ? object->GetConstructorFunctionIndex()
- : Map::kNoConstructorFunctionIndex),
- next_free_property_index_(object->NextFreePropertyIndex()),
- unused_property_fields_(object->UnusedPropertyFields()),
- supports_fast_array_iteration_(
- SupportsFastArrayIteration(broker->isolate(), object)),
- supports_fast_array_resize_(
- SupportsFastArrayResize(broker->isolate(), object)),
- is_abandoned_prototype_map_(object->is_abandoned_prototype_map()),
- elements_kind_generalizations_(broker->zone()) {}
-
-JSFunctionData::JSFunctionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSFunction> object)
- : JSObjectData(broker, storage, object),
- has_feedback_vector_(object->has_feedback_vector()),
- has_initial_map_(object->has_prototype_slot() &&
- object->has_initial_map()),
- has_prototype_(object->has_prototype_slot() && object->has_prototype()),
- PrototypeRequiresRuntimeLookup_(
- object->PrototypeRequiresRuntimeLookup()) {}
-
-void JSFunctionData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "JSFunctionData::Serialize");
- Handle<JSFunction> function = Handle<JSFunction>::cast(object());
-
- DCHECK_NULL(context_);
- DCHECK_NULL(native_context_);
- DCHECK_NULL(initial_map_);
- DCHECK_NULL(prototype_);
- DCHECK_NULL(shared_);
-
- context_ = broker->GetOrCreateData(function->context());
- native_context_ = broker->GetOrCreateData(function->native_context());
- shared_ = broker->GetOrCreateData(function->shared());
-
- initial_map_ = has_initial_map()
- ? broker->GetOrCreateData(function->initial_map())
- : nullptr;
- prototype_ = has_prototype() ? broker->GetOrCreateData(function->prototype())
- : nullptr;
-
- if (initial_map_ != nullptr) {
- initial_map_instance_size_with_min_slack_ =
- function->ComputeInstanceSizeWithMinSlack(broker->isolate());
- }
- if (initial_map_ != nullptr && !initial_map_->should_access_heap()) {
- if (initial_map_->AsMap()->instance_type() == JS_ARRAY_TYPE) {
- initial_map_->AsMap()->SerializeElementsKindGeneralizations(broker);
- }
- initial_map_->AsMap()->SerializeConstructor(broker);
- // TODO(neis): This is currently only needed for native_context's
- // object_function, as used by GetObjectCreateMap. If no further use sites
- // show up, we should move this into NativeContextData::Serialize.
- initial_map_->AsMap()->SerializePrototype(broker);
- }
-}
-
-void JSFunctionData::SerializeCodeAndFeedback(JSHeapBroker* broker) {
- DCHECK(serialized_);
- if (serialized_code_and_feedback_) return;
- serialized_code_and_feedback_ = true;
-
- TraceScope tracer(broker, this, "JSFunctionData::SerializeCodeAndFeedback");
- Handle<JSFunction> function = Handle<JSFunction>::cast(object());
-
- DCHECK_NULL(feedback_cell_);
- DCHECK_NULL(feedback_vector_);
- DCHECK_NULL(code_);
- if (!FLAG_turbo_direct_heap_access) {
- // This is conditionalized because Code objects are never serialized now.
- // We only need to represent the code object in serialized data when
- // we're unable to perform direct heap accesses.
- code_ = broker->GetOrCreateData(function->code(kAcquireLoad));
- }
- feedback_cell_ = broker->GetOrCreateData(function->raw_feedback_cell());
- feedback_vector_ = has_feedback_vector()
- ? broker->GetOrCreateData(function->feedback_vector())
- : nullptr;
-}
-
-void MapData::SerializeElementsKindGeneralizations(JSHeapBroker* broker) {
- if (serialized_elements_kind_generalizations_) return;
- serialized_elements_kind_generalizations_ = true;
-
- TraceScope tracer(broker, this,
- "MapData::SerializeElementsKindGeneralizations");
- DCHECK_EQ(instance_type(), JS_ARRAY_TYPE);
- MapRef self(broker, this);
- ElementsKind from_kind = self.elements_kind();
- DCHECK(elements_kind_generalizations_.empty());
- for (int i = FIRST_FAST_ELEMENTS_KIND; i <= LAST_FAST_ELEMENTS_KIND; i++) {
- ElementsKind to_kind = static_cast<ElementsKind>(i);
- if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
- Handle<Map> target =
- Map::AsElementsKind(broker->isolate(), self.object(), to_kind);
- elements_kind_generalizations_.push_back(broker->GetOrCreateData(target));
- }
- }
-}
-
-class DescriptorArrayData : public HeapObjectData {
- public:
- DescriptorArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<DescriptorArray> object)
- : HeapObjectData(broker, storage, object), contents_(broker->zone()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-
- ObjectData* FindFieldOwner(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).field_owner;
- }
-
- PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).details;
- }
-
- ObjectData* GetPropertyKey(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).key;
- }
-
- FieldIndex GetFieldIndexFor(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).field_index;
- }
-
- ObjectData* GetFieldType(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).field_type;
- }
-
- ObjectData* GetStrongValue(InternalIndex descriptor_index) const {
- return contents_.at(descriptor_index.as_int()).value;
- }
-
- bool serialized_descriptor(InternalIndex descriptor_index) const {
- return contents_.find(descriptor_index.as_int()) != contents_.end();
- }
-
- void SerializeDescriptor(JSHeapBroker* broker, Handle<Map> map,
- InternalIndex descriptor_index);
-
- private:
- ZoneMap<int, PropertyDescriptor> contents_;
-};
-
-void DescriptorArrayData::SerializeDescriptor(JSHeapBroker* broker,
- Handle<Map> map,
- InternalIndex descriptor_index) {
- CHECK_LT(descriptor_index.as_int(), map->NumberOfOwnDescriptors());
- if (contents_.find(descriptor_index.as_int()) != contents_.end()) return;
-
- Isolate* const isolate = broker->isolate();
- auto descriptors = Handle<DescriptorArray>::cast(object());
- CHECK_EQ(*descriptors, map->instance_descriptors(isolate));
-
- PropertyDescriptor d;
- d.key = broker->GetOrCreateData(descriptors->GetKey(descriptor_index));
- MaybeObject value = descriptors->GetValue(descriptor_index);
- HeapObject obj;
- if (value.GetHeapObjectIfStrong(&obj)) {
- d.value = broker->GetOrCreateData(obj);
- }
- d.details = descriptors->GetDetails(descriptor_index);
- if (d.details.location() == kField) {
- d.field_index = FieldIndex::ForDescriptor(*map, descriptor_index);
- d.field_owner =
- broker->GetOrCreateData(map->FindFieldOwner(isolate, descriptor_index));
- d.field_type =
- broker->GetOrCreateData(descriptors->GetFieldType(descriptor_index));
- }
- contents_[descriptor_index.as_int()] = d;
-
- if (d.details.location() == kField && !d.field_owner->should_access_heap()) {
- // Recurse on the owner map.
- d.field_owner->AsMap()->SerializeOwnDescriptor(broker, descriptor_index);
- }
-
- TRACE(broker, "Copied descriptor " << descriptor_index.as_int() << " into "
- << this << " (" << contents_.size()
- << " total)");
-}
-
-class FeedbackCellData : public HeapObjectData {
- public:
- FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FeedbackCell> object);
-
- ObjectData* value() const { return value_; }
-
- private:
- ObjectData* const value_;
-};
-
-FeedbackCellData::FeedbackCellData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FeedbackCell> object)
- : HeapObjectData(broker, storage, object),
- value_(object->value().IsFeedbackVector()
- ? broker->GetOrCreateData(object->value())
- : nullptr) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
-
-class FeedbackVectorData : public HeapObjectData {
- public:
- FeedbackVectorData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FeedbackVector> object);
-
- double invocation_count() const { return invocation_count_; }
-
- ObjectData* shared_function_info() {
- CHECK(serialized_);
- return shared_function_info_;
- }
-
- void Serialize(JSHeapBroker* broker);
- bool serialized() const { return serialized_; }
- ObjectData* GetClosureFeedbackCell(JSHeapBroker* broker, int index) const;
-
- private:
- double const invocation_count_;
-
- bool serialized_ = false;
- ObjectData* shared_function_info_;
- ZoneVector<ObjectData*> closure_feedback_cell_array_;
-};
-
-FeedbackVectorData::FeedbackVectorData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<FeedbackVector> object)
- : HeapObjectData(broker, storage, object),
- invocation_count_(object->invocation_count()),
- closure_feedback_cell_array_(broker->zone()) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
-
-ObjectData* FeedbackVectorData::GetClosureFeedbackCell(JSHeapBroker* broker,
- int index) const {
- CHECK_GE(index, 0);
-
- size_t cell_array_size = closure_feedback_cell_array_.size();
- if (!serialized_) {
- DCHECK_EQ(cell_array_size, 0);
- TRACE_BROKER_MISSING(broker,
- " closure feedback cell array for vector " << this);
- return nullptr;
- }
- CHECK_LT(index, cell_array_size);
- return closure_feedback_cell_array_[index];
-}
-
-void FeedbackVectorData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "FeedbackVectorData::Serialize");
- Handle<FeedbackVector> vector = Handle<FeedbackVector>::cast(object());
- Handle<SharedFunctionInfo> sfi(vector->shared_function_info(),
- broker->isolate());
- shared_function_info_ = broker->GetOrCreateData(sfi);
- DCHECK(closure_feedback_cell_array_.empty());
- int length = vector->closure_feedback_cell_array().length();
- closure_feedback_cell_array_.reserve(length);
- for (int i = 0; i < length; ++i) {
- Handle<FeedbackCell> cell = vector->GetClosureFeedbackCell(i);
- ObjectData* cell_data = broker->GetOrCreateData(cell);
- closure_feedback_cell_array_.push_back(cell_data);
- }
- TRACE(broker, "Copied " << length << " feedback cells");
-}
-
-class FixedArrayBaseData : public HeapObjectData {
- public:
- FixedArrayBaseData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FixedArrayBase> object)
- : HeapObjectData(broker, storage, object), length_(object->length()) {}
-
- int length() const { return length_; }
-
- private:
- int const length_;
-};
-
-class FixedArrayData : public FixedArrayBaseData {
- public:
- FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FixedArray> object);
-
- // Creates all elements of the fixed array.
- void SerializeContents(JSHeapBroker* broker);
-
- ObjectData* Get(int i) const;
-
- private:
- bool serialized_contents_ = false;
- ZoneVector<ObjectData*> contents_;
-};
-
-JSDataViewData::JSDataViewData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSDataView> object)
- : JSObjectData(broker, storage, object),
- byte_length_(object->byte_length()) {}
-
-JSBoundFunctionData::JSBoundFunctionData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<JSBoundFunction> object)
- : JSObjectData(broker, storage, object) {}
-
-bool JSBoundFunctionData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return true;
- if (broker->StackHasOverflowed()) return false;
-
- TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
- Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
-
- // We don't immediately set {serialized_} in order to correctly handle the
- // case where a recursive call to this method reaches the stack limit.
-
- DCHECK_NULL(bound_target_function_);
- bound_target_function_ =
- broker->GetOrCreateData(function->bound_target_function());
- bool serialized_nested = true;
- if (!bound_target_function_->should_access_heap()) {
- if (bound_target_function_->IsJSBoundFunction()) {
- serialized_nested =
- bound_target_function_->AsJSBoundFunction()->Serialize(broker);
- } else if (bound_target_function_->IsJSFunction()) {
- bound_target_function_->AsJSFunction()->Serialize(broker);
- }
- }
- if (!serialized_nested) {
- // We couldn't serialize all nested bound functions due to stack
- // overflow. Give up.
- DCHECK(!serialized_);
- bound_target_function_ = nullptr; // Reset to sync with serialized_.
- return false;
- }
-
- serialized_ = true;
-
- DCHECK_NULL(bound_arguments_);
- bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
- if (!bound_arguments_->should_access_heap()) {
- bound_arguments_->AsFixedArray()->SerializeContents(broker);
- }
-
- DCHECK_NULL(bound_this_);
- bound_this_ = broker->GetOrCreateData(function->bound_this());
-
- return true;
-}
-
-JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSObject> object)
- : JSReceiverData(broker, storage, object),
- inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()),
- own_properties_(broker->zone()) {}
-
-FixedArrayData::FixedArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FixedArray> object)
- : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
-
-void FixedArrayData::SerializeContents(JSHeapBroker* broker) {
- if (serialized_contents_) return;
- serialized_contents_ = true;
-
- TraceScope tracer(broker, this, "FixedArrayData::SerializeContents");
- Handle<FixedArray> array = Handle<FixedArray>::cast(object());
- CHECK_EQ(array->length(), length());
- CHECK(contents_.empty());
- contents_.reserve(static_cast<size_t>(length()));
-
- for (int i = 0; i < length(); i++) {
- Handle<Object> value(array->get(i), broker->isolate());
- contents_.push_back(broker->GetOrCreateData(value));
- }
- TRACE(broker, "Copied " << contents_.size() << " elements");
-}
-
-class FixedDoubleArrayData : public FixedArrayBaseData {
- public:
- FixedDoubleArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<FixedDoubleArray> object);
-
- // Serializes all elements of the fixed array.
- void SerializeContents(JSHeapBroker* broker);
-
- Float64 Get(int i) const;
-
- private:
- bool serialized_contents_ = false;
- ZoneVector<Float64> contents_;
-};
-
-FixedDoubleArrayData::FixedDoubleArrayData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<FixedDoubleArray> object)
- : FixedArrayBaseData(broker, storage, object), contents_(broker->zone()) {}
-
-void FixedDoubleArrayData::SerializeContents(JSHeapBroker* broker) {
- if (serialized_contents_) return;
- serialized_contents_ = true;
-
- TraceScope tracer(broker, this, "FixedDoubleArrayData::SerializeContents");
- Handle<FixedDoubleArray> self = Handle<FixedDoubleArray>::cast(object());
- CHECK_EQ(self->length(), length());
- CHECK(contents_.empty());
- contents_.reserve(static_cast<size_t>(length()));
-
- for (int i = 0; i < length(); i++) {
- contents_.push_back(Float64::FromBits(self->get_representation(i)));
- }
- TRACE(broker, "Copied " << contents_.size() << " elements");
-}
-
-class BytecodeArrayData : public FixedArrayBaseData {
- public:
- int register_count() const { return register_count_; }
- int parameter_count() const { return parameter_count_; }
- interpreter::Register incoming_new_target_or_generator_register() const {
- return incoming_new_target_or_generator_register_;
- }
-
- BytecodeArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<BytecodeArray> object)
- : FixedArrayBaseData(broker, storage, object),
- register_count_(object->register_count()),
- parameter_count_(object->parameter_count()),
- incoming_new_target_or_generator_register_(
- object->incoming_new_target_or_generator_register()) {}
-
- private:
- int const register_count_;
- int const parameter_count_;
- interpreter::Register const incoming_new_target_or_generator_register_;
-};
-
-class JSArrayData : public JSObjectData {
- public:
- JSArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSArray> object);
-
- void Serialize(JSHeapBroker* broker);
- ObjectData* length() const {
- CHECK(serialized_);
- return length_;
- }
-
- ObjectData* GetOwnElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- bool serialized_ = false;
- ObjectData* length_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
-};
-
-JSArrayData::JSArrayData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSArray> object)
- : JSObjectData(broker, storage, object), own_elements_(broker->zone()) {}
-
-void JSArrayData::Serialize(JSHeapBroker* broker) {
- CHECK(!FLAG_turbo_direct_heap_access);
-
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "JSArrayData::Serialize");
- Handle<JSArray> jsarray = Handle<JSArray>::cast(object());
-
- DCHECK_NULL(length_);
- length_ = broker->GetOrCreateData(jsarray->length());
-}
-
-ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, false);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_elements_.push_back({index, result});
- return result;
-}
-
-class ScopeInfoData : public HeapObjectData {
- public:
- ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<ScopeInfo> object);
-
- int ContextLength() const { return context_length_; }
- bool HasContextExtensionSlot() const { return has_context_extension_slot_; }
- bool HasOuterScopeInfo() const { return has_outer_scope_info_; }
-
- ObjectData* OuterScopeInfo() const { return outer_scope_info_; }
- void SerializeScopeInfoChain(JSHeapBroker* broker);
-
- private:
- int const context_length_;
- bool const has_context_extension_slot_;
- bool const has_outer_scope_info_;
-
- // Only serialized via SerializeScopeInfoChain.
- ObjectData* outer_scope_info_;
-};
-
-ScopeInfoData::ScopeInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<ScopeInfo> object)
- : HeapObjectData(broker, storage, object),
- context_length_(object->ContextLength()),
- has_context_extension_slot_(object->HasContextExtensionSlot()),
- has_outer_scope_info_(object->HasOuterScopeInfo()),
- outer_scope_info_(nullptr) {
- DCHECK(!FLAG_turbo_direct_heap_access);
-}
-
-void ScopeInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
- if (outer_scope_info_) return;
- if (!has_outer_scope_info_) return;
- outer_scope_info_ = broker->GetOrCreateData(
- Handle<ScopeInfo>::cast(object())->OuterScopeInfo());
- if (!outer_scope_info_->should_access_heap()) {
- outer_scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
- }
-}
-
-class SharedFunctionInfoData : public HeapObjectData {
- public:
- SharedFunctionInfoData(JSHeapBroker* broker, ObjectData** storage,
- Handle<SharedFunctionInfo> object);
-
- int builtin_id() const { return builtin_id_; }
- int context_header_size() const { return context_header_size_; }
- ObjectData* GetBytecodeArray() const { return GetBytecodeArray_; }
- SharedFunctionInfo::Inlineability GetInlineability() const {
- return inlineability_;
- }
- void SerializeFunctionTemplateInfo(JSHeapBroker* broker);
- ObjectData* scope_info() const { return scope_info_; }
- void SerializeScopeInfoChain(JSHeapBroker* broker);
- ObjectData* function_template_info() const { return function_template_info_; }
- ObjectData* GetTemplateObject(FeedbackSlot slot) const {
- auto lookup_it = template_objects_.find(slot.ToInt());
- if (lookup_it != template_objects_.cend()) {
- return lookup_it->second;
- }
- return nullptr;
- }
- void SetTemplateObject(FeedbackSlot slot, ObjectData* object) {
- CHECK(
- template_objects_.insert(std::make_pair(slot.ToInt(), object)).second);
- }
-
-#define DECL_ACCESSOR(type, name) \
- type name() const { return name##_; }
- BROKER_SFI_FIELDS(DECL_ACCESSOR)
-#undef DECL_ACCESSOR
-
- private:
- int const builtin_id_;
- int const context_header_size_;
- ObjectData* const GetBytecodeArray_;
-#define DECL_MEMBER(type, name) type const name##_;
- BROKER_SFI_FIELDS(DECL_MEMBER)
-#undef DECL_MEMBER
- SharedFunctionInfo::Inlineability const inlineability_;
- ObjectData* function_template_info_;
- ZoneMap<int, ObjectData*> template_objects_;
- ObjectData* scope_info_;
-};
-
-SharedFunctionInfoData::SharedFunctionInfoData(
- JSHeapBroker* broker, ObjectData** storage,
- Handle<SharedFunctionInfo> object)
- : HeapObjectData(broker, storage, object),
- builtin_id_(object->HasBuiltinId() ? object->builtin_id()
- : Builtins::kNoBuiltinId),
- context_header_size_(object->scope_info().ContextHeaderLength()),
- GetBytecodeArray_(object->HasBytecodeArray()
- ? broker->GetOrCreateData(
- object->GetBytecodeArray(broker->isolate()))
- : nullptr)
-#define INIT_MEMBER(type, name) , name##_(object->name())
- BROKER_SFI_FIELDS(INIT_MEMBER)
-#undef INIT_MEMBER
- ,
- inlineability_(object->GetInlineability(broker->isolate())),
- function_template_info_(nullptr),
- template_objects_(broker->zone()),
- scope_info_(nullptr) {
- DCHECK_EQ(HasBuiltinId_, builtin_id_ != Builtins::kNoBuiltinId);
- DCHECK_EQ(HasBytecodeArray_, GetBytecodeArray_ != nullptr);
-}
-
-void SharedFunctionInfoData::SerializeFunctionTemplateInfo(
- JSHeapBroker* broker) {
- if (function_template_info_) return;
- function_template_info_ = broker->GetOrCreateData(
- Handle<SharedFunctionInfo>::cast(object())->function_data(kAcquireLoad));
-}
-
-void SharedFunctionInfoData::SerializeScopeInfoChain(JSHeapBroker* broker) {
- if (scope_info_) return;
- scope_info_ = broker->GetOrCreateData(
- Handle<SharedFunctionInfo>::cast(object())->scope_info());
- if (!scope_info_->should_access_heap()) {
- scope_info_->AsScopeInfo()->SerializeScopeInfoChain(broker);
- }
-}
-
-class SourceTextModuleData : public HeapObjectData {
- public:
- SourceTextModuleData(JSHeapBroker* broker, ObjectData** storage,
- Handle<SourceTextModule> object);
- void Serialize(JSHeapBroker* broker);
-
- ObjectData* GetCell(JSHeapBroker* broker, int cell_index) const;
- ObjectData* GetImportMeta(JSHeapBroker* broker) const;
-
- private:
- bool serialized_ = false;
- ZoneVector<ObjectData*> imports_;
- ZoneVector<ObjectData*> exports_;
- ObjectData* import_meta_;
-};
-
-SourceTextModuleData::SourceTextModuleData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<SourceTextModule> object)
- : HeapObjectData(broker, storage, object),
- imports_(broker->zone()),
- exports_(broker->zone()),
- import_meta_(nullptr) {}
-
-ObjectData* SourceTextModuleData::GetCell(JSHeapBroker* broker,
- int cell_index) const {
- if (!serialized_) {
- DCHECK(imports_.empty());
- TRACE_BROKER_MISSING(broker,
- "module cell " << cell_index << " on " << this);
- return nullptr;
- }
- ObjectData* cell;
- switch (SourceTextModuleDescriptor::GetCellIndexKind(cell_index)) {
- case SourceTextModuleDescriptor::kImport:
- cell = imports_.at(SourceTextModule::ImportIndex(cell_index));
- break;
- case SourceTextModuleDescriptor::kExport:
- cell = exports_.at(SourceTextModule::ExportIndex(cell_index));
- break;
- case SourceTextModuleDescriptor::kInvalid:
- UNREACHABLE();
- }
- CHECK_NOT_NULL(cell);
- return cell;
-}
-
-ObjectData* SourceTextModuleData::GetImportMeta(JSHeapBroker* broker) const {
- CHECK(serialized_);
- return import_meta_;
-}
-
-void SourceTextModuleData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return;
- serialized_ = true;
-
- TraceScope tracer(broker, this, "SourceTextModuleData::Serialize");
- Handle<SourceTextModule> module = Handle<SourceTextModule>::cast(object());
-
- // TODO(neis): We could be smarter and only serialize the cells we care about.
- // TODO(neis): Define a helper for serializing a FixedArray into a ZoneVector.
-
- DCHECK(imports_.empty());
- Handle<FixedArray> imports(module->regular_imports(), broker->isolate());
- int const imports_length = imports->length();
- imports_.reserve(imports_length);
- for (int i = 0; i < imports_length; ++i) {
- imports_.push_back(broker->GetOrCreateData(imports->get(i)));
- }
- TRACE(broker, "Copied " << imports_.size() << " imports");
-
- DCHECK(exports_.empty());
- Handle<FixedArray> exports(module->regular_exports(), broker->isolate());
- int const exports_length = exports->length();
- exports_.reserve(exports_length);
- for (int i = 0; i < exports_length; ++i) {
- exports_.push_back(broker->GetOrCreateData(exports->get(i)));
- }
- TRACE(broker, "Copied " << exports_.size() << " exports");
-
- DCHECK_NULL(import_meta_);
- import_meta_ = broker->GetOrCreateData(module->import_meta());
- TRACE(broker, "Copied import_meta");
-}
-
-class CellData : public HeapObjectData {
- public:
- CellData(JSHeapBroker* broker, ObjectData** storage, Handle<Cell> object)
- : HeapObjectData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-};
-
-class JSGlobalObjectData : public JSObjectData {
- public:
- JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSGlobalObject> object);
- bool IsDetached() const { return is_detached_; }
-
- ObjectData* GetPropertyCell(
- JSHeapBroker* broker, ObjectData* name,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- bool const is_detached_;
-
- // Properties that either
- // (1) are known to exist as property cells on the global object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
-};
-
-JSGlobalObjectData::JSGlobalObjectData(JSHeapBroker* broker,
- ObjectData** storage,
- Handle<JSGlobalObject> object)
- : JSObjectData(broker, storage, object),
- is_detached_(object->IsDetached()),
- properties_(broker->zone()) {}
-
-class JSGlobalProxyData : public JSObjectData {
- public:
- JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSGlobalProxy> object);
-};
-
-JSGlobalProxyData::JSGlobalProxyData(JSHeapBroker* broker, ObjectData** storage,
- Handle<JSGlobalProxy> object)
- : JSObjectData(broker, storage, object) {}
-
-namespace {
-base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
- Handle<Name> name) {
- LookupIterator it(
- broker->isolate(),
- handle(broker->target_native_context().object()->global_object(),
- broker->isolate()),
- name, LookupIterator::OWN);
- it.TryLookupCachedProperty();
- if (it.state() == LookupIterator::DATA &&
- it.GetHolder<JSObject>()->IsJSGlobalObject()) {
- return PropertyCellRef(broker, it.GetPropertyCell());
- }
- return base::nullopt;
-}
-} // namespace
-
-ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
- ObjectData* name,
- SerializationPolicy policy) {
- CHECK_NOT_NULL(name);
- for (auto const& p : properties_) {
- if (p.first == name) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about global property " << name);
- return nullptr;
- }
-
- ObjectData* result = nullptr;
- base::Optional<PropertyCellRef> cell =
- GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
- if (cell.has_value()) {
- result = cell->data();
- if (!result->should_access_heap()) {
- result->AsPropertyCell()->Serialize(broker);
- }
- }
- properties_.push_back({name, result});
- return result;
-}
-
-class TemplateObjectDescriptionData : public HeapObjectData {
- public:
- TemplateObjectDescriptionData(JSHeapBroker* broker, ObjectData** storage,
- Handle<TemplateObjectDescription> object)
- : HeapObjectData(broker, storage, object) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-};
-
-class CodeData : public HeapObjectData {
- public:
- CodeData(JSHeapBroker* broker, ObjectData** storage, Handle<Code> object)
- : HeapObjectData(broker, storage, object),
- inlined_bytecode_size_(object->inlined_bytecode_size() > 0 &&
- !object->marked_for_deoptimization()
- ? object->inlined_bytecode_size()
- : 0) {
- DCHECK(!FLAG_turbo_direct_heap_access);
- }
-
- unsigned inlined_bytecode_size() const { return inlined_bytecode_size_; }
-
- private:
- unsigned const inlined_bytecode_size_;
-};
-
-#define DEFINE_IS(Name) \
- bool ObjectData::Is##Name() const { \
- if (should_access_heap()) { \
- return object()->Is##Name(); \
- } \
- if (is_smi()) return false; \
- InstanceType instance_type = \
- static_cast<const HeapObjectData*>(this)->GetMapInstanceType(); \
- return InstanceTypeChecker::Is##Name(instance_type); \
- }
-HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
-HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS)
-HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS)
-HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS)
-#undef DEFINE_IS
-
-#define DEFINE_AS(Name) \
- Name##Data* ObjectData::As##Name() { \
- CHECK(Is##Name()); \
- CHECK(kind_ == kSerializedHeapObject || \
- kind_ == kBackgroundSerializedHeapObject); \
- return static_cast<Name##Data*>(this); \
- }
-HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
-HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
-HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_AS)
-#undef DEFINE_AS
-
-// TODO(solanes, v8:10866): Remove once FLAG_turbo_direct_heap_access is
-// removed.
-// This macro defines the Asxxx methods for NeverSerialized objects, which
-// should only be used with direct heap access off.
-#define DEFINE_AS(Name) \
- Name##Data* ObjectData::As##Name() { \
- DCHECK(!FLAG_turbo_direct_heap_access); \
- CHECK(Is##Name()); \
- CHECK_EQ(kind_, kSerializedHeapObject); \
- return static_cast<Name##Data*>(this); \
- }
-HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_AS)
-#undef DEFINE_AS
-
-ObjectData* JSObjectData::GetInobjectField(int property_index) const {
- CHECK_LT(static_cast<size_t>(property_index), inobject_fields_.size());
- return inobject_fields_[property_index];
-}
-
-bool JSObjectData::cow_or_empty_elements_tenured() const {
- return cow_or_empty_elements_tenured_;
-}
-
-ObjectData* JSObjectData::elements() const {
- CHECK(serialized_elements_);
- return elements_;
-}
-
-void JSObjectData::SerializeAsBoilerplate(JSHeapBroker* broker) {
- SerializeRecursiveAsBoilerplate(broker, kMaxFastLiteralDepth);
-}
-
-void JSObjectData::SerializeElements(JSHeapBroker* broker) {
- if (serialized_elements_) return;
- serialized_elements_ = true;
-
- TraceScope tracer(broker, this, "JSObjectData::SerializeElements");
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
- Handle<FixedArrayBase> elements_object(boilerplate->elements(),
- broker->isolate());
- DCHECK_NULL(elements_);
- elements_ = broker->GetOrCreateData(elements_object);
- DCHECK(elements_->IsFixedArrayBase());
-}
-
-void MapData::SerializeConstructor(JSHeapBroker* broker) {
- if (serialized_constructor_) return;
- serialized_constructor_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeConstructor");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK(!map->IsContextMap());
- DCHECK_NULL(constructor_);
- constructor_ = broker->GetOrCreateData(map->GetConstructor());
-}
-
-void MapData::SerializeBackPointer(JSHeapBroker* broker) {
- if (serialized_backpointer_) return;
- serialized_backpointer_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeBackPointer");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(backpointer_);
- DCHECK(!map->IsContextMap());
- backpointer_ = broker->GetOrCreateData(map->GetBackPointer());
-}
-
-void MapData::SerializePrototype(JSHeapBroker* broker) {
- if (serialized_prototype_) return;
- serialized_prototype_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializePrototype");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(prototype_);
- prototype_ = broker->GetOrCreateData(map->prototype());
-}
-
-void MapData::SerializeOwnDescriptors(JSHeapBroker* broker) {
- if (serialized_own_descriptors_) return;
- serialized_own_descriptors_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptors");
- Handle<Map> map = Handle<Map>::cast(object());
-
- for (InternalIndex i : map->IterateOwnDescriptors()) {
- SerializeOwnDescriptor(broker, i);
- }
-}
-
-void MapData::SerializeOwnDescriptor(JSHeapBroker* broker,
- InternalIndex descriptor_index) {
- TraceScope tracer(broker, this, "MapData::SerializeOwnDescriptor");
- Handle<Map> map = Handle<Map>::cast(object());
- Isolate* isolate = broker->isolate();
-
- if (instance_descriptors_ == nullptr) {
- instance_descriptors_ =
- broker->GetOrCreateData(map->instance_descriptors(isolate));
- }
-
- if (instance_descriptors()->should_access_heap()) {
- // When accessing the fields concurrently, we still have to recurse on the
- // owner map if it is different than the current map. This is because
- // {instance_descriptors_} gets set on SerializeOwnDescriptor and otherwise
- // we risk the field owner having a null {instance_descriptors_}.
- Handle<DescriptorArray> descriptors(map->instance_descriptors(isolate),
- isolate);
- if (descriptors->GetDetails(descriptor_index).location() == kField) {
- Handle<Map> owner(map->FindFieldOwner(isolate, descriptor_index),
- isolate);
- if (!owner.equals(map)) {
- broker->GetOrCreateData(owner)->AsMap()->SerializeOwnDescriptor(
- broker, descriptor_index);
- }
- }
- } else {
- DescriptorArrayData* descriptors =
- instance_descriptors()->AsDescriptorArray();
- descriptors->SerializeDescriptor(broker, map, descriptor_index);
- }
-}
-
-void MapData::SerializeRootMap(JSHeapBroker* broker) {
- if (serialized_root_map_) return;
- serialized_root_map_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeRootMap");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(root_map_);
- root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
-}
-
-ObjectData* MapData::FindRootMap() const { return root_map_; }
-
-void JSObjectData::SerializeRecursiveAsBoilerplate(JSHeapBroker* broker,
- int depth) {
- if (serialized_as_boilerplate_) return;
- serialized_as_boilerplate_ = true;
-
- TraceScope tracer(broker, this,
- "JSObjectData::SerializeRecursiveAsBoilerplate");
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(object());
-
- // We only serialize boilerplates that pass the IsInlinableFastLiteral
- // check, so we only do a check on the depth here.
- CHECK_GT(depth, 0);
- CHECK(!boilerplate->map().is_deprecated());
-
- // Serialize the elements.
- Isolate* const isolate = broker->isolate();
- Handle<FixedArrayBase> elements_object(boilerplate->elements(), isolate);
-
- // Boilerplates need special serialization - we need to make sure COW arrays
- // are tenured. Boilerplate objects should only be reachable from their
- // allocation site, so it is safe to assume that the elements have not been
- // serialized yet.
-
- bool const empty_or_cow =
- elements_object->length() == 0 ||
- elements_object->map() == ReadOnlyRoots(isolate).fixed_cow_array_map();
- if (empty_or_cow) {
- // We need to make sure copy-on-write elements are tenured.
- if (ObjectInYoungGeneration(*elements_object)) {
- elements_object = isolate->factory()->CopyAndTenureFixedCOWArray(
- Handle<FixedArray>::cast(elements_object));
- boilerplate->set_elements(*elements_object);
- }
- cow_or_empty_elements_tenured_ = true;
- }
-
- DCHECK_NULL(elements_);
- DCHECK(!serialized_elements_);
- serialized_elements_ = true;
- elements_ = broker->GetOrCreateData(elements_object);
- DCHECK(elements_->IsFixedArrayBase());
-
- if (empty_or_cow || elements_->should_access_heap()) {
- // No need to do anything here. Empty or copy-on-write elements
- // do not need to be serialized because we only need to store the elements
- // reference to the allocated object.
- } else if (boilerplate->HasSmiOrObjectElements()) {
- elements_->AsFixedArray()->SerializeContents(broker);
- Handle<FixedArray> fast_elements =
- Handle<FixedArray>::cast(elements_object);
- int length = elements_object->length();
- for (int i = 0; i < length; i++) {
- Handle<Object> value(fast_elements->get(i), isolate);
- if (value->IsJSObject()) {
- ObjectData* value_data = broker->GetOrCreateData(value);
- if (!value_data->should_access_heap()) {
- value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
- depth - 1);
- }
- }
- }
- } else {
- CHECK(boilerplate->HasDoubleElements());
- CHECK_LE(elements_object->Size(), kMaxRegularHeapObjectSize);
- DCHECK_EQ(elements_->kind(), ObjectDataKind::kSerializedHeapObject);
- elements_->AsFixedDoubleArray()->SerializeContents(broker);
- }
-
- // TODO(turbofan): Do we want to support out-of-object properties?
- CHECK(boilerplate->HasFastProperties() &&
- boilerplate->property_array().length() == 0);
- CHECK_EQ(inobject_fields_.size(), 0u);
-
- // Check the in-object properties.
- Handle<DescriptorArray> descriptors(
- boilerplate->map().instance_descriptors(isolate), isolate);
- for (InternalIndex i : boilerplate->map().IterateOwnDescriptors()) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() != kField) continue;
- DCHECK_EQ(kData, details.kind());
-
- FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
- // Make sure {field_index} agrees with {inobject_properties} on the index of
- // this field.
- DCHECK_EQ(field_index.property_index(),
- static_cast<int>(inobject_fields_.size()));
- Handle<Object> value(boilerplate->RawFastPropertyAt(field_index), isolate);
- // In case of double fields we use a sentinel NaN value to mark
- // uninitialized fields. A boilerplate value with such a field may migrate
- // from its double to a tagged representation. The sentinel value carries
- // no special meaning when it occurs in a heap number, so we would like to
- // recover the uninitialized value. We check for the sentinel here,
- // specifically, since migrations might have been triggered as part of
- // boilerplate serialization.
- if (!details.representation().IsDouble() && value->IsHeapNumber() &&
- HeapNumber::cast(*value).value_as_bits() == kHoleNanInt64) {
- value = isolate->factory()->uninitialized_value();
- }
- ObjectData* value_data = broker->GetOrCreateData(value);
- if (value_data->IsJSObject() && !value_data->should_access_heap()) {
- value_data->AsJSObject()->SerializeRecursiveAsBoilerplate(broker,
- depth - 1);
- }
- inobject_fields_.push_back(value_data);
- }
- TRACE(broker, "Copied " << inobject_fields_.size() << " in-object fields");
-
- if (!map()->should_access_heap()) {
- map()->AsMap()->SerializeOwnDescriptors(broker);
- }
-
- if (IsJSArray() && !FLAG_turbo_direct_heap_access) {
- AsJSArray()->Serialize(broker);
- }
-}
-
-void RegExpBoilerplateDescriptionData::Serialize(JSHeapBroker* broker) {
- if (serialized_) return; // Only serialize once.
- serialized_ = true;
-
- TraceScope tracer(broker, this,
- "RegExpBoilerplateDescriptionData::Serialize");
- auto boilerplate = Handle<RegExpBoilerplateDescription>::cast(object());
-
- data_ = broker->GetOrCreateData(boilerplate->data());
- source_ = broker->GetOrCreateData(boilerplate->source());
- flags_ = boilerplate->flags();
-}
-
-#ifdef DEBUG
-bool ObjectRef::IsNeverSerializedHeapObject() const {
- return data_->kind() == ObjectDataKind::kNeverSerializedHeapObject;
-}
-#endif // DEBUG
-
-bool ObjectRef::equals(const ObjectRef& other) const {
-#ifdef DEBUG
- if (broker()->mode() == JSHeapBroker::kSerialized &&
- data_->used_status == ObjectData::Usage::kUnused) {
- data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
- }
-#endif // DEBUG
- return data_ == other.data_;
-}
-
-bool ObjectRef::ShouldHaveBeenSerialized() const {
- return broker()->mode() == JSHeapBroker::kSerialized &&
- data()->kind() == kSerializedHeapObject;
-}
-
-Isolate* ObjectRef::isolate() const { return broker()->isolate(); }
-
-ContextRef ContextRef::previous(size_t* depth,
- SerializationPolicy policy) const {
- DCHECK_NOT_NULL(depth);
-
- if (data_->should_access_heap()) {
- Context current = *object();
- while (*depth != 0 && current.unchecked_previous().IsContext()) {
- current = Context::cast(current.unchecked_previous());
- (*depth)--;
- }
- return ContextRef(broker(), broker()->CanonicalPersistentHandle(current));
- }
-
- if (*depth == 0) return *this;
-
- ObjectData* previous_data = data()->AsContext()->previous(broker(), policy);
- if (previous_data == nullptr || !previous_data->IsContext()) return *this;
-
- *depth = *depth - 1;
- return ContextRef(broker(), previous_data).previous(depth, policy);
-}
-
-base::Optional<ObjectRef> ContextRef::get(int index,
- SerializationPolicy policy) const {
- if (data_->should_access_heap()) {
- Handle<Object> value(object()->get(index), broker()->isolate());
- return ObjectRef(broker(), value);
- }
- ObjectData* optional_slot =
- data()->AsContext()->GetSlot(broker(), index, policy);
- if (optional_slot != nullptr) {
- return ObjectRef(broker(), optional_slot);
- }
- return base::nullopt;
-}
-
-SourceTextModuleRef ContextRef::GetModule(SerializationPolicy policy) const {
- ContextRef current = *this;
- while (current.map().instance_type() != MODULE_CONTEXT_TYPE) {
- size_t depth = 1;
- current = current.previous(&depth, policy);
- CHECK_EQ(depth, 0);
- }
- return current.get(Context::EXTENSION_INDEX, policy)
- .value()
- .AsSourceTextModule();
-}
-
JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
bool tracing_enabled, bool is_concurrent_inlining,
CodeKind code_kind)
@@ -2560,7 +58,6 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
// immediately with a larger-capacity one. It doesn't seem to affect the
// performance in a noticeable way though.
TRACE(this, "Constructing heap broker");
- DCHECK_IMPLIES(is_concurrent_inlining_, FLAG_turbo_direct_heap_access);
}
JSHeapBroker::~JSHeapBroker() { DCHECK_NULL(local_isolate_); }
@@ -2602,7 +99,7 @@ void JSHeapBroker::AttachLocalIsolate(OptimizedCompilationInfo* info,
local_isolate_->heap()->AttachPersistentHandles(
info->DetachPersistentHandles());
- if (FLAG_turbo_direct_heap_access) {
+ if (is_concurrent_inlining()) {
// Ensure any serialization that happens on the background has been
// performed.
target_native_context().SerializeOnBackground();
@@ -2625,88 +122,6 @@ void JSHeapBroker::StopSerializing() {
mode_ = kSerialized;
}
-#ifdef DEBUG
-void JSHeapBroker::PrintRefsAnalysis() const {
- // Usage counts
- size_t used_total = 0, unused_total = 0, identity_used_total = 0;
- for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
- ref = refs_->Next(ref)) {
- switch (ref->value->used_status) {
- case ObjectData::Usage::kUnused:
- ++unused_total;
- break;
- case ObjectData::Usage::kOnlyIdentityUsed:
- ++identity_used_total;
- break;
- case ObjectData::Usage::kDataUsed:
- ++used_total;
- break;
- }
- }
-
- // Ref types analysis
- TRACE_BROKER_MEMORY(
- this, "Refs: " << refs_->occupancy() << "; data used: " << used_total
- << "; only identity used: " << identity_used_total
- << "; unused: " << unused_total);
- size_t used_smis = 0, unused_smis = 0, identity_used_smis = 0;
- size_t used[LAST_TYPE + 1] = {0};
- size_t unused[LAST_TYPE + 1] = {0};
- size_t identity_used[LAST_TYPE + 1] = {0};
- for (RefsMap::Entry* ref = refs_->Start(); ref != nullptr;
- ref = refs_->Next(ref)) {
- if (ref->value->is_smi()) {
- switch (ref->value->used_status) {
- case ObjectData::Usage::kUnused:
- ++unused_smis;
- break;
- case ObjectData::Usage::kOnlyIdentityUsed:
- ++identity_used_smis;
- break;
- case ObjectData::Usage::kDataUsed:
- ++used_smis;
- break;
- }
- } else {
- InstanceType instance_type;
- if (ref->value->should_access_heap()) {
- instance_type = Handle<HeapObject>::cast(ref->value->object())
- ->map()
- .instance_type();
- } else {
- instance_type = ref->value->AsHeapObject()->GetMapInstanceType();
- }
- CHECK_LE(FIRST_TYPE, instance_type);
- CHECK_LE(instance_type, LAST_TYPE);
- switch (ref->value->used_status) {
- case ObjectData::Usage::kUnused:
- ++unused[instance_type];
- break;
- case ObjectData::Usage::kOnlyIdentityUsed:
- ++identity_used[instance_type];
- break;
- case ObjectData::Usage::kDataUsed:
- ++used[instance_type];
- break;
- }
- }
- }
-
- TRACE_BROKER_MEMORY(
- this, "Smis: " << used_smis + identity_used_smis + unused_smis
- << "; data used: " << used_smis << "; only identity used: "
- << identity_used_smis << "; unused: " << unused_smis);
- for (uint16_t i = FIRST_TYPE; i <= LAST_TYPE; ++i) {
- size_t total = used[i] + identity_used[i] + unused[i];
- if (total == 0) continue;
- TRACE_BROKER_MEMORY(
- this, InstanceType(i) << ": " << total << "; data used: " << used[i]
- << "; only identity used: " << identity_used[i]
- << "; unused: " << unused[i]);
- }
-}
-#endif // DEBUG
-
void JSHeapBroker::Retire() {
CHECK_EQ(mode_, kSerialized);
TRACE(this, "Retiring");
@@ -2719,16 +134,11 @@ void JSHeapBroker::Retire() {
void JSHeapBroker::SetTargetNativeContextRef(
Handle<NativeContext> native_context) {
- // The MapData constructor uses {target_native_context_}. This creates a
- // benign cycle that we break by setting {target_native_context_} right before
- // starting to serialize (thus creating dummy data), and then again properly
- // right after.
DCHECK((mode() == kDisabled && !target_native_context_.has_value()) ||
(mode() == kSerializing &&
- target_native_context_->object().equals(native_context) &&
- target_native_context_->data_->kind() == kUnserializedHeapObject));
- target_native_context_ =
- NativeContextRef(this, CanonicalPersistentHandle(*native_context));
+ target_native_context_->object().is_identical_to(native_context) &&
+ target_native_context_->is_unserialized_heap_object()));
+ target_native_context_ = MakeRef(this, *native_context);
}
void JSHeapBroker::CollectArrayAndObjectPrototypes() {
@@ -2755,7 +165,7 @@ StringRef JSHeapBroker::GetTypedArrayStringTag(ElementsKind kind) {
switch (kind) {
#define TYPED_ARRAY_STRING_TAG(Type, type, TYPE, ctype) \
case ElementsKind::TYPE##_ELEMENTS: \
- return StringRef(this, isolate()->factory()->Type##Array_string());
+ return MakeRef(this, isolate()->factory()->Type##Array_string());
TYPED_ARRAYS(TYPED_ARRAY_STRING_TAG)
#undef TYPED_ARRAY_STRING_TAG
default:
@@ -2798,201 +208,38 @@ bool JSHeapBroker::IsSerializedForCompilation(
}
bool JSHeapBroker::IsArrayOrObjectPrototype(const JSObjectRef& object) const {
+ return IsArrayOrObjectPrototype(object.object());
+}
+
+bool JSHeapBroker::IsArrayOrObjectPrototype(Handle<JSObject> object) const {
if (mode() == kDisabled) {
- return isolate()->IsInAnyContext(*object.object(),
+ return isolate()->IsInAnyContext(*object,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX) ||
- isolate()->IsInAnyContext(*object.object(),
+ isolate()->IsInAnyContext(*object,
Context::INITIAL_OBJECT_PROTOTYPE_INDEX);
}
CHECK(!array_and_object_prototypes_.empty());
- return array_and_object_prototypes_.find(object.object()) !=
+ return array_and_object_prototypes_.find(object) !=
array_and_object_prototypes_.end();
}
-void JSHeapBroker::InitializeAndStartSerializing(
- Handle<NativeContext> native_context) {
- TraceScope tracer(this, "JSHeapBroker::InitializeAndStartSerializing");
-
- CHECK_EQ(mode_, kDisabled);
- mode_ = kSerializing;
-
- // Throw away the dummy data that we created while disabled.
- refs_->Clear();
- refs_ =
- zone()->New<RefsMap>(kInitialRefsBucketCount, AddressMatcher(), zone());
-
- SetTargetNativeContextRef(native_context);
- target_native_context().Serialize();
- if (!FLAG_turbo_direct_heap_access) {
- // Perform full native context serialization now if we can't do it later on
- // the background thread.
- target_native_context().SerializeOnBackground();
- }
-
- CollectArrayAndObjectPrototypes();
-
- Factory* const f = isolate()->factory();
- if (!FLAG_turbo_direct_heap_access) {
- ObjectData* data;
- data = GetOrCreateData(f->array_buffer_detaching_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->array_constructor_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->array_iterator_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->array_species_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->no_elements_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->promise_hook_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->promise_species_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->promise_then_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- data = GetOrCreateData(f->string_length_protector());
- if (!data->should_access_heap()) data->AsPropertyCell()->Serialize(this);
- }
- GetOrCreateData(f->many_closures_cell());
- GetOrCreateData(
- CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, kArgvOnStack, true));
-
- TRACE(this, "Finished serializing standard objects");
+ObjectData* JSHeapBroker::TryGetOrCreateData(Object object,
+ GetOrCreateDataFlags flags) {
+ return TryGetOrCreateData(CanonicalPersistentHandle(object), flags);
}
ObjectData* JSHeapBroker::GetOrCreateData(Handle<Object> object,
- ObjectRef::BackgroundSerialization background_serialization) {
- ObjectData* return_value =
- TryGetOrCreateData(object, true, background_serialization);
+ GetOrCreateDataFlags flags) {
+ ObjectData* return_value = TryGetOrCreateData(object, flags | kCrashOnError);
DCHECK_NOT_NULL(return_value);
return return_value;
}
-// clang-format off
-ObjectData* JSHeapBroker::TryGetOrCreateData(Handle<Object> object,
- bool crash_on_error,
- ObjectRef::BackgroundSerialization background_serialization) {
- RefsMap::Entry* entry = refs_->Lookup(object.address());
- if (entry != nullptr) return entry->value;
-
- if (mode() == JSHeapBroker::kDisabled) {
- entry = refs_->LookupOrInsert(object.address());
- ObjectData** storage = &(entry->value);
- if (*storage == nullptr) {
- entry->value = zone()->New<ObjectData>(
- this, storage, object,
- object->IsSmi() ? kSmi : kUnserializedHeapObject);
- }
- return *storage;
- }
-
- CHECK(mode() == JSHeapBroker::kSerializing ||
- mode() == JSHeapBroker::kSerialized);
-
- ObjectData* object_data;
- if (object->IsSmi()) {
- entry = refs_->LookupOrInsert(object.address());
- object_data = zone()->New<ObjectData>(this, &(entry->value), object, kSmi);
- } else if (IsReadOnlyHeapObject(*object)) {
- entry = refs_->LookupOrInsert(object.address());
- object_data = zone()->New<ObjectData>(this, &(entry->value), object,
- kUnserializedReadOnlyHeapObject);
-// TODO(solanes, v8:10866): Remove the if/else in this macro once we remove the
-// FLAG_turbo_direct_heap_access.
-#define CREATE_DATA_FOR_DIRECT_READ(name) \
- } else if (object->Is##name()) { \
- if (FLAG_turbo_direct_heap_access) { \
- entry = refs_->LookupOrInsert(object.address()); \
- object_data = zone()->New<ObjectData>( \
- this, &(entry->value), object, kNeverSerializedHeapObject); \
- } else if (mode() == kSerializing) { \
- entry = refs_->LookupOrInsert(object.address()); \
- object_data = zone()->New<name##Data>(this, &(entry->value), \
- Handle<name>::cast(object)); \
- } else { \
- CHECK(!crash_on_error); \
- return nullptr; \
- }
- HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_DIRECT_READ)
-#undef CREATE_DATA_FOR_DIRECT_READ
-#define CREATE_DATA_FOR_POSSIBLE_SERIALIZATION(name) \
- } else if (object->Is##name()) { \
- if (mode() == kSerialized && \
- background_serialization != \
- ObjectRef::BackgroundSerialization::kAllowed) { \
- CHECK(!crash_on_error); \
- return nullptr; \
- } \
- entry = refs_->LookupOrInsert(object.address()); \
- ObjectDataKind kind = (background_serialization == \
- ObjectRef::BackgroundSerialization::kAllowed) \
- ? kBackgroundSerializedHeapObject \
- : kSerializedHeapObject; \
- object_data = zone()->New<name##Data>(this, &(entry->value), \
- Handle<name>::cast(object), \
- kind);
- HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(
- CREATE_DATA_FOR_POSSIBLE_SERIALIZATION)
-#undef CREATE_DATA_FOR_POSSIBLE_SERIALIZATION
-#define CREATE_DATA_FOR_BACKGROUND_SERIALIZATION(name) \
- } else if (object->Is##name()) { \
- if (FLAG_turbo_direct_heap_access) { \
- entry = refs_->LookupOrInsert(object.address()); \
- object_data = zone()->New<name##Data>( \
- this, &(entry->value), Handle<name>::cast(object), \
- kBackgroundSerializedHeapObject); \
- } else if (mode() == kSerializing) { \
- entry = refs_->LookupOrInsert(object.address()); \
- object_data = zone()->New<name##Data>(this, &(entry->value), \
- Handle<name>::cast(object)); \
- } else { \
- CHECK(!crash_on_error); \
- return nullptr; \
- }
- HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(
- CREATE_DATA_FOR_BACKGROUND_SERIALIZATION)
-#undef CREATE_DATA_FOR_SERIALIZATION
-#define CREATE_DATA_FOR_SERIALIZATION(name) \
- } else if (object->Is##name()) { \
- if (mode() == kSerializing) { \
- entry = refs_->LookupOrInsert(object.address()); \
- object_data = zone()->New<name##Data>(this, &(entry->value), \
- Handle<name>::cast(object)); \
- } else { \
- CHECK(!crash_on_error); \
- return nullptr; \
- }
- HEAP_BROKER_SERIALIZED_OBJECT_LIST(CREATE_DATA_FOR_SERIALIZATION)
-#undef CREATE_DATA_FOR_SERIALIZATION
- } else {
- UNREACHABLE();
- }
- // At this point the entry pointer is not guaranteed to be valid as
- // the refs_ hash hable could be resized by one of the constructors above.
- DCHECK_EQ(object_data, refs_->Lookup(object.address())->value);
- return object_data;
-}
-// clang-format on
-
-ObjectData* JSHeapBroker::GetOrCreateData(
- Object object,
- ObjectRef::BackgroundSerialization background_serialization) {
- return GetOrCreateData(CanonicalPersistentHandle(object),
- background_serialization);
+ObjectData* JSHeapBroker::GetOrCreateData(Object object,
+ GetOrCreateDataFlags flags) {
+ return GetOrCreateData(CanonicalPersistentHandle(object), flags);
}
-#define DEFINE_IS_AND_AS(Name) \
- bool ObjectRef::Is##Name() const { return data()->Is##Name(); } \
- Name##Ref ObjectRef::As##Name() const { \
- DCHECK(Is##Name()); \
- return Name##Ref(broker(), data()); \
- }
-HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
-HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
-HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
-HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEFINE_IS_AND_AS)
-#undef DEFINE_IS_AND_AS
-
bool JSHeapBroker::StackHasOverflowed() const {
DCHECK_IMPLIES(local_isolate_ == nullptr,
ThreadId::Current() == isolate_->thread_id());
@@ -3002,1742 +249,12 @@ bool JSHeapBroker::StackHasOverflowed() const {
}
bool JSHeapBroker::ObjectMayBeUninitialized(Handle<Object> object) const {
- return !IsMainThread() && object->IsHeapObject() &&
- isolate()->heap()->IsPendingAllocation(HeapObject::cast(*object));
-}
-
-bool ObjectRef::IsSmi() const { return data()->is_smi(); }
-
-int ObjectRef::AsSmi() const {
- DCHECK(IsSmi());
- // Handle-dereference is always allowed for Handle<Smi>.
- return Handle<Smi>::cast(object())->value();
-}
-
-base::Optional<MapRef> JSObjectRef::GetObjectCreateMap() const {
- if (data_->should_access_heap()) {
- Handle<Map> instance_map;
- if (Map::TryGetObjectCreateMap(broker()->isolate(), object())
- .ToHandle(&instance_map)) {
- return MapRef(broker(), instance_map);
- } else {
- return base::Optional<MapRef>();
- }
- }
- ObjectData* map_data = data()->AsJSObject()->object_create_map(broker());
- if (map_data == nullptr) return base::Optional<MapRef>();
- if (map_data->should_access_heap()) {
- return MapRef(broker(), map_data->object());
- }
- return MapRef(broker(), map_data->AsMap());
-}
-
-#define DEF_TESTER(Type, ...) \
- bool MapRef::Is##Type##Map() const { \
- return InstanceTypeChecker::Is##Type(instance_type()); \
- }
-INSTANCE_TYPE_CHECKERS(DEF_TESTER)
-#undef DEF_TESTER
-
-base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
- if (data_->should_access_heap()) {
- return MapRef(broker(),
- Map::AsElementsKind(broker()->isolate(), object(), kind));
- }
- if (kind == elements_kind()) return *this;
- const ZoneVector<ObjectData*>& elements_kind_generalizations =
- data()->AsMap()->elements_kind_generalizations();
- for (auto data : elements_kind_generalizations) {
- MapRef map(broker(), data);
- if (map.elements_kind() == kind) return map;
- }
- return base::Optional<MapRef>();
+ if (!object->IsHeapObject()) return false;
+ return ObjectMayBeUninitialized(HeapObject::cast(*object));
}
-void MapRef::SerializeForElementLoad() {
- if (data()->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeForElementLoad(broker());
-}
-
-void MapRef::SerializeForElementStore() {
- if (data()->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeForElementStore(broker());
-}
-
-void MapData::SerializeForElementLoad(JSHeapBroker* broker) {
- if (serialized_for_element_load_) return;
- serialized_for_element_load_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeForElementLoad");
- SerializePrototype(broker);
-}
-
-void MapData::SerializeForElementStore(JSHeapBroker* broker) {
- if (serialized_for_element_store_) return;
- serialized_for_element_store_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeForElementStore");
- // TODO(solanes, v8:7790): This should use MapData methods rather than
- // constructing MapRefs, but it involves non-trivial refactoring and this
- // method should go away anyway once the compiler is fully concurrent.
- MapRef map(broker, this);
- for (MapRef prototype_map = map;;) {
- prototype_map.SerializePrototype();
- prototype_map = prototype_map.prototype().map();
- if (prototype_map.oddball_type() == OddballType::kNull ||
- !map.prototype().IsJSObject() || !prototype_map.is_stable() ||
- !IsFastElementsKind(prototype_map.elements_kind())) {
- return;
- }
- }
-}
-
-bool MapRef::HasOnlyStablePrototypesWithFastElements(
- ZoneVector<MapRef>* prototype_maps) {
- DCHECK_NOT_NULL(prototype_maps);
- MapRef prototype_map = prototype().map();
- while (prototype_map.oddball_type() != OddballType::kNull) {
- if (!prototype().IsJSObject() || !prototype_map.is_stable() ||
- !IsFastElementsKind(prototype_map.elements_kind())) {
- return false;
- }
- prototype_maps->push_back(prototype_map);
- prototype_map = prototype_map.prototype().map();
- }
- return true;
-}
-
-bool MapRef::supports_fast_array_iteration() const {
- if (data_->should_access_heap()) {
- return SupportsFastArrayIteration(broker()->isolate(), object());
- }
- return data()->AsMap()->supports_fast_array_iteration();
-}
-
-bool MapRef::supports_fast_array_resize() const {
- if (data_->should_access_heap()) {
- return SupportsFastArrayResize(broker()->isolate(), object());
- }
- return data()->AsMap()->supports_fast_array_resize();
-}
-
-int JSFunctionRef::InitialMapInstanceSizeWithMinSlack() const {
- if (data_->should_access_heap()) {
- return object()->ComputeInstanceSizeWithMinSlack(broker()->isolate());
- }
- return data()->AsJSFunction()->initial_map_instance_size_with_min_slack();
-}
-
-OddballType MapRef::oddball_type() const {
- if (instance_type() != ODDBALL_TYPE) {
- return OddballType::kNone;
- }
- Factory* f = broker()->isolate()->factory();
- if (equals(MapRef(broker(), f->undefined_map()))) {
- return OddballType::kUndefined;
- }
- if (equals(MapRef(broker(), f->null_map()))) {
- return OddballType::kNull;
- }
- if (equals(MapRef(broker(), f->boolean_map()))) {
- return OddballType::kBoolean;
- }
- if (equals(MapRef(broker(), f->the_hole_map()))) {
- return OddballType::kHole;
- }
- if (equals(MapRef(broker(), f->uninitialized_map()))) {
- return OddballType::kUninitialized;
- }
- DCHECK(equals(MapRef(broker(), f->termination_exception_map())) ||
- equals(MapRef(broker(), f->arguments_marker_map())) ||
- equals(MapRef(broker(), f->optimized_out_map())) ||
- equals(MapRef(broker(), f->stale_register_map())));
- return OddballType::kOther;
-}
-
-FeedbackCellRef FeedbackVectorRef::GetClosureFeedbackCell(int index) const {
- if (data_->should_access_heap()) {
- FeedbackCell cell = object()->closure_feedback_cell(index);
- Handle<FeedbackCell> cell_handle =
- broker()->CanonicalPersistentHandle(cell);
- // These should all be available because we request the cell for each
- // CreateClosure bytecode.
- ObjectData* cell_data = broker()->GetOrCreateData(cell_handle);
- return FeedbackCellRef(broker(), cell_data);
- }
-
- return FeedbackCellRef(
- broker(),
- data()->AsFeedbackVector()->GetClosureFeedbackCell(broker(), index));
-}
-
-ObjectRef JSObjectRef::RawFastPropertyAt(FieldIndex index) const {
- CHECK(index.is_inobject());
- if (data_->should_access_heap()) {
- return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
- object()->RawFastPropertyAt(index)));
- }
- JSObjectData* object_data = data()->AsJSObject();
- return ObjectRef(broker(),
- object_data->GetInobjectField(index.property_index()));
-}
-
-bool AllocationSiteRef::IsFastLiteral() const {
- if (data_->should_access_heap()) {
- CHECK_NE(data_->kind(), ObjectDataKind::kNeverSerializedHeapObject);
- return IsInlinableFastLiteral(
- handle(object()->boilerplate(), broker()->isolate()));
- }
- return data()->AsAllocationSite()->IsFastLiteral();
-}
-
-void AllocationSiteRef::SerializeBoilerplate() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsAllocationSite()->SerializeBoilerplate(broker());
-}
-
-void JSObjectRef::SerializeElements() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSObject()->SerializeElements(broker());
-}
-
-void JSObjectRef::EnsureElementsTenured() {
- if (data_->should_access_heap()) {
- Handle<FixedArrayBase> object_elements = elements().value().object();
- if (ObjectInYoungGeneration(*object_elements)) {
- // If we would like to pretenure a fixed cow array, we must ensure that
- // the array is already in old space, otherwise we'll create too many
- // old-to-new-space pointers (overflowing the store buffer).
- object_elements =
- broker()->isolate()->factory()->CopyAndTenureFixedCOWArray(
- Handle<FixedArray>::cast(object_elements));
- object()->set_elements(*object_elements);
- }
- return;
- }
- CHECK(data()->AsJSObject()->cow_or_empty_elements_tenured());
-}
-
-FieldIndex MapRef::GetFieldIndexFor(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- FieldIndex result = FieldIndex::ForDescriptor(*object(), descriptor_index);
- DCHECK(result.is_inobject());
- return result;
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- FieldIndex result = descriptors->GetFieldIndexFor(descriptor_index);
- DCHECK(result.is_inobject());
- return result;
-}
-
-int MapRef::GetInObjectPropertyOffset(int i) const {
- if (data_->should_access_heap()) {
- return object()->GetInObjectPropertyOffset(i);
- }
- return (GetInObjectPropertiesStartInWords() + i) * kTaggedSize;
-}
-
-PropertyDetails MapRef::GetPropertyDetails(
- InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetPropertyDetails(descriptor_index);
-}
-
-NameRef MapRef::GetPropertyKey(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetPropertyKey(descriptor_index);
-}
-
-bool MapRef::IsFixedCowArrayMap() const {
- Handle<Map> fixed_cow_array_map =
- ReadOnlyRoots(broker()->isolate()).fixed_cow_array_map_handle();
- return equals(MapRef(broker(), fixed_cow_array_map));
-}
-
-bool MapRef::IsPrimitiveMap() const {
- return instance_type() <= LAST_PRIMITIVE_HEAP_OBJECT_TYPE;
-}
-
-MapRef MapRef::FindFieldOwner(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // TODO(solanes, v8:7790): Consider caching the result of the field owner on
- // the descriptor array. It would be useful for same map as well as any
- // other map sharing that descriptor array.
- return MapRef(broker(), broker()->GetOrCreateData(object()->FindFieldOwner(
- broker()->isolate(), descriptor_index)));
- }
- DescriptorArrayData* descriptors =
- data()->AsMap()->instance_descriptors()->AsDescriptorArray();
- return MapRef(broker(), descriptors->FindFieldOwner(descriptor_index));
-}
-
-ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetFieldType(descriptor_index);
-}
-
-base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy) const {
- if (data_->should_access_heap()) {
- // TODO(solanes, neis, v8:7790, v8:11012): Re-enable this optimization for
- // concurrent inlining when we have the infrastructure to safely do so.
- if (broker()->is_concurrent_inlining()) return base::nullopt;
- CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
- return GetOwnElementFromHeap(broker(), object(), index, true);
- }
- ObjectData* element =
- data()->AsString()->GetCharAsStringOrUndefined(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
-}
-
-base::Optional<int> StringRef::length() const {
- if (data_->should_access_heap()) {
- if (data_->kind() == kNeverSerializedHeapObject &&
- !this->IsInternalizedString()) {
- TRACE_BROKER_MISSING(
- broker(),
- "length for kNeverSerialized non-internalized string " << *this);
- return base::nullopt;
- } else {
- return object()->synchronized_length();
- }
- }
- return data()->AsString()->length();
-}
-
-base::Optional<uint16_t> StringRef::GetFirstChar() {
- if (data_->should_access_heap()) {
- if (data_->kind() == kNeverSerializedHeapObject &&
- !this->IsInternalizedString()) {
- TRACE_BROKER_MISSING(
- broker(),
- "first char for kNeverSerialized non-internalized string " << *this);
- return base::nullopt;
- }
-
- if (!broker()->IsMainThread()) {
- return object()->Get(0, broker()->local_isolate());
- } else {
- // TODO(solanes, v8:7790): Remove this case once the inlining phase is
- // done concurrently all the time.
- return object()->Get(0);
- }
- }
- return data()->AsString()->first_char();
-}
-
-base::Optional<double> StringRef::ToNumber() {
- if (data_->should_access_heap()) {
- if (data_->kind() == kNeverSerializedHeapObject &&
- !this->IsInternalizedString()) {
- TRACE_BROKER_MISSING(
- broker(),
- "number for kNeverSerialized non-internalized string " << *this);
- return base::nullopt;
- }
-
- return TryStringToDouble(broker()->local_isolate(), object());
- }
- return data()->AsString()->to_number();
-}
-
-int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
- if (data_->should_access_heap()) {
- return object()->constant_elements().length();
- }
- return data()->AsArrayBoilerplateDescription()->constants_elements_length();
-}
-
-ObjectRef FixedArrayRef::get(int i) const {
- if (data_->should_access_heap()) {
- return ObjectRef(broker(),
- broker()->CanonicalPersistentHandle(object()->get(i)));
- }
- return ObjectRef(broker(), data()->AsFixedArray()->Get(i));
-}
-
-Float64 FixedDoubleArrayRef::get(int i) const {
- if (data_->should_access_heap()) {
- return Float64::FromBits(object()->get_representation(i));
- } else {
- return data()->AsFixedDoubleArray()->Get(i);
- }
-}
-
-Handle<ByteArray> BytecodeArrayRef::SourcePositionTable() const {
- return broker()->CanonicalPersistentHandle(object()->SourcePositionTable());
-}
-
-Address BytecodeArrayRef::handler_table_address() const {
- return reinterpret_cast<Address>(
- object()->handler_table().GetDataStartAddress());
-}
-
-int BytecodeArrayRef::handler_table_size() const {
- return object()->handler_table().length();
-}
-
-#define IF_ACCESS_FROM_HEAP_C(name) \
- if (data_->should_access_heap()) { \
- return object()->name(); \
- }
-
-#define IF_ACCESS_FROM_HEAP(result, name) \
- if (data_->should_access_heap()) { \
- return result##Ref(broker(), \
- broker()->CanonicalPersistentHandle(object()->name())); \
- }
-
-// Macros for definining a const getter that, depending on the data kind,
-// either looks into the heap or into the serialized data.
-#define BIMODAL_ACCESSOR(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP(result, name); \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
- }
-
-// Like above except that the result type is not an XYZRef.
-#define BIMODAL_ACCESSOR_C(holder, result, name) \
- result holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_C(name); \
- return ObjectRef::data()->As##holder()->name(); \
- }
-
-// Like above but for BitFields.
-#define BIMODAL_ACCESSOR_B(holder, field, name, BitField) \
- typename BitField::FieldType holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_C(name); \
- return BitField::decode(ObjectRef::data()->As##holder()->field()); \
- }
-
-// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
-// kSerialized only for methods that we identified to be safe.
-#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
- return result##Ref(broker(), \
- broker()->CanonicalPersistentHandle(object()->name())); \
- }
-#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) { \
- return object()->name(); \
- }
-
-// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
-// FLAG_turbo_direct_heap_access is true (even for kSerialized). This is because
-// we identified the method to be safe to use direct heap access, but the
-// holder##Data class still needs to be serialized.
-#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
- }
-#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
- result holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
- return ObjectRef::data()->As##holder()->name(); \
- }
-#define BIMODAL_ACCESSOR_WITH_FLAG_B(holder, field, name, BitField) \
- typename BitField::FieldType holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
- return BitField::decode(ObjectRef::data()->As##holder()->field()); \
- }
-
-BIMODAL_ACCESSOR(AllocationSite, Object, nested_site)
-BIMODAL_ACCESSOR_C(AllocationSite, bool, CanInlineCall)
-BIMODAL_ACCESSOR_C(AllocationSite, bool, PointsToLiteral)
-BIMODAL_ACCESSOR_C(AllocationSite, ElementsKind, GetElementsKind)
-BIMODAL_ACCESSOR_C(AllocationSite, AllocationType, GetAllocationType)
-
-BIMODAL_ACCESSOR_C(BigInt, uint64_t, AsUint64)
-
-BIMODAL_ACCESSOR_C(BytecodeArray, int, register_count)
-BIMODAL_ACCESSOR_C(BytecodeArray, int, parameter_count)
-BIMODAL_ACCESSOR_C(BytecodeArray, interpreter::Register,
- incoming_new_target_or_generator_register)
-
-BIMODAL_ACCESSOR_C(FeedbackVector, double, invocation_count)
-
-BIMODAL_ACCESSOR(HeapObject, Map, map)
-
-BIMODAL_ACCESSOR_C(HeapNumber, double, value)
-
-// These JSBoundFunction fields are immutable after initialization. Moreover,
-// as long as JSObjects are still serialized on the main thread, all
-// JSBoundFunctionRefs are created at a time when the underlying objects are
-// guaranteed to be fully initialized.
-BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, JSReceiver, bound_target_function)
-BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, Object, bound_this)
-BIMODAL_ACCESSOR_WITH_FLAG(JSBoundFunction, FixedArray, bound_arguments)
-
-BIMODAL_ACCESSOR_C(JSDataView, size_t, byte_length)
-
-BIMODAL_ACCESSOR_C(JSFunction, bool, has_feedback_vector)
-BIMODAL_ACCESSOR_C(JSFunction, bool, has_initial_map)
-BIMODAL_ACCESSOR_C(JSFunction, bool, has_prototype)
-BIMODAL_ACCESSOR_C(JSFunction, bool, PrototypeRequiresRuntimeLookup)
-BIMODAL_ACCESSOR(JSFunction, Context, context)
-BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
-BIMODAL_ACCESSOR(JSFunction, Map, initial_map)
-BIMODAL_ACCESSOR(JSFunction, Object, prototype)
-BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
-BIMODAL_ACCESSOR(JSFunction, FeedbackCell, raw_feedback_cell)
-BIMODAL_ACCESSOR(JSFunction, FeedbackVector, feedback_vector)
-
-BIMODAL_ACCESSOR_C(JSGlobalObject, bool, IsDetached)
-
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field2, elements_kind,
- Map::Bits2::ElementsKindBits)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_dictionary_map,
- Map::Bits3::IsDictionaryMapBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_deprecated,
- Map::Bits3::IsDeprecatedBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, NumberOfOwnDescriptors,
- Map::Bits3::NumberOfOwnDescriptorsBits)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field3, is_migration_target,
- Map::Bits3::IsMigrationTargetBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_prototype_slot,
- Map::Bits1::HasPrototypeSlotBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_access_check_needed,
- Map::Bits1::IsAccessCheckNeededBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_callable,
- Map::Bits1::IsCallableBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, has_indexed_interceptor,
- Map::Bits1::HasIndexedInterceptorBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_constructor,
- Map::Bits1::IsConstructorBit)
-BIMODAL_ACCESSOR_WITH_FLAG_B(Map, bit_field, is_undetectable,
- Map::Bits1::IsUndetectableBit)
-BIMODAL_ACCESSOR_C(Map, int, instance_size)
-BIMODAL_ACCESSOR_C(Map, int, NextFreePropertyIndex)
-BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, prototype)
-BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, Object, GetConstructor)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
-BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
-
-#define DEF_NATIVE_CONTEXT_ACCESSOR(type, name) \
- BIMODAL_ACCESSOR(NativeContext, type, name)
-BROKER_NATIVE_CONTEXT_FIELDS(DEF_NATIVE_CONTEXT_ACCESSOR)
-#undef DEF_NATIVE_CONTEXT_ACCESSOR
-
-BIMODAL_ACCESSOR_C(ObjectBoilerplateDescription, int, size)
-
-BIMODAL_ACCESSOR(PropertyCell, Object, value)
-BIMODAL_ACCESSOR_C(PropertyCell, PropertyDetails, property_details)
-
-BIMODAL_ACCESSOR(RegExpBoilerplateDescription, FixedArray, data)
-BIMODAL_ACCESSOR(RegExpBoilerplateDescription, String, source)
-BIMODAL_ACCESSOR_C(RegExpBoilerplateDescription, int, flags)
-
-base::Optional<CallHandlerInfoRef> FunctionTemplateInfoRef::call_code() const {
- if (data_->should_access_heap()) {
- HeapObject call_code = object()->call_code(kAcquireLoad);
- if (call_code.IsUndefined()) return base::nullopt;
- return CallHandlerInfoRef(broker(),
- broker()->CanonicalPersistentHandle(call_code));
- }
- ObjectData* call_code = data()->AsFunctionTemplateInfo()->call_code();
- if (!call_code) return base::nullopt;
- return CallHandlerInfoRef(broker(), call_code);
-}
-
-bool FunctionTemplateInfoRef::is_signature_undefined() const {
- if (data_->should_access_heap()) {
- return object()->signature().IsUndefined(broker()->isolate());
- }
- return data()->AsFunctionTemplateInfo()->is_signature_undefined();
-}
-
-bool FunctionTemplateInfoRef::has_call_code() const {
- if (data_->should_access_heap()) {
- HeapObject call_code = object()->call_code(kAcquireLoad);
- return !call_code.IsUndefined();
- }
- return data()->AsFunctionTemplateInfo()->has_call_code();
-}
-
-bool FunctionTemplateInfoRef ::accept_any_receiver() const {
- if (data_->should_access_heap()) {
- return object()->accept_any_receiver();
- }
- return ObjectRef ::data()->AsFunctionTemplateInfo()->accept_any_receiver();
-}
-
-HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
- MapRef receiver_map, SerializationPolicy policy) {
- const HolderLookupResult not_found;
-
- if (data_->should_access_heap()) {
- // There are currently two ways we can see a FunctionTemplateInfo on the
- // background thread: 1.) As part of a SharedFunctionInfo and 2.) in an
- // AccessorPair. In both cases, the FTI is fully constructed on the main
- // thread before.
- // TODO(nicohartmann@, v8:7790): Once the above no longer holds, we might
- // have to use the GC predicate to check whether objects are fully
- // initialized and safe to read.
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !object()->accept_any_receiver())) {
- return not_found;
- }
-
- if (!receiver_map.IsJSObjectMap()) return not_found;
-
- DCHECK(has_call_code());
-
- DisallowGarbageCollection no_gc;
- HeapObject signature = object()->signature();
- if (signature.IsUndefined()) {
- return HolderLookupResult(CallOptimization::kHolderIsReceiver);
- }
- auto expected_receiver_type = FunctionTemplateInfo::cast(signature);
- if (expected_receiver_type.IsTemplateFor(*receiver_map.object())) {
- return HolderLookupResult(CallOptimization::kHolderIsReceiver);
- }
-
- if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- receiver_map.SerializePrototype();
- }
- if (!receiver_map.serialized_prototype()) return not_found;
- if (receiver_map.prototype().IsNull()) return not_found;
-
- JSObject raw_prototype = JSObject::cast(*receiver_map.prototype().object());
- if (!expected_receiver_type.IsTemplateFor(raw_prototype.map())) {
- return not_found;
- }
- Handle<JSObject> prototype =
- broker()->CanonicalPersistentHandle(raw_prototype);
- if (ObjectData* data = broker()->TryGetOrCreateData(prototype)) {
- return HolderLookupResult(CallOptimization::kHolderFound,
- JSObjectRef(broker(), data));
- }
-
- TRACE_BROKER_MISSING(broker(),
- "holder for receiver with map " << receiver_map);
- return not_found;
- }
-
- FunctionTemplateInfoData* fti_data = data()->AsFunctionTemplateInfo();
- KnownReceiversMap::iterator lookup_it =
- fti_data->known_receivers().find(receiver_map.data());
- if (lookup_it != fti_data->known_receivers().cend()) {
- return lookup_it->second;
- }
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_BROKER_MISSING(broker(),
- "holder for receiver with map " << receiver_map);
- return not_found;
- }
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() && !accept_any_receiver())) {
- fti_data->known_receivers().insert({receiver_map.data(), not_found});
- return not_found;
- }
-
- HolderLookupResult result;
- CallOptimization call_optimization(broker()->isolate(), object());
- Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
- receiver_map.object(), &result.lookup);
-
- switch (result.lookup) {
- case CallOptimization::kHolderFound: {
- result.holder = JSObjectRef(broker(), holder);
- fti_data->known_receivers().insert({receiver_map.data(), result});
- break;
- }
- default: {
- DCHECK_EQ(result.holder, base::nullopt);
- fti_data->known_receivers().insert({receiver_map.data(), result});
- }
- }
- return result;
-}
-
-BIMODAL_ACCESSOR(CallHandlerInfo, Object, data)
-
-BIMODAL_ACCESSOR_C(ScopeInfo, int, ContextLength)
-BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasContextExtensionSlot)
-BIMODAL_ACCESSOR_C(ScopeInfo, bool, HasOuterScopeInfo)
-BIMODAL_ACCESSOR(ScopeInfo, ScopeInfo, OuterScopeInfo)
-
-BIMODAL_ACCESSOR_C(SharedFunctionInfo, int, builtin_id)
-BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- BytecodeArray bytecode_array;
- if (!broker()->IsMainThread()) {
- bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
- } else {
- bytecode_array = object()->GetBytecodeArray(broker()->isolate());
- }
- return BytecodeArrayRef(
- broker(), broker()->CanonicalPersistentHandle(bytecode_array));
- }
- return BytecodeArrayRef(
- broker(), ObjectRef ::data()->AsSharedFunctionInfo()->GetBytecodeArray());
-}
-#define DEF_SFI_ACCESSOR(type, name) \
- BIMODAL_ACCESSOR_WITH_FLAG_C(SharedFunctionInfo, type, name)
-BROKER_SFI_FIELDS(DEF_SFI_ACCESSOR)
-#undef DEF_SFI_ACCESSOR
-SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
- const {
- if (data_->should_access_heap()) {
- if (!broker()->IsMainThread()) {
- return object()->GetInlineability(broker()->local_isolate());
- } else {
- return object()->GetInlineability(broker()->isolate());
- }
- }
- return ObjectRef ::data()->AsSharedFunctionInfo()->GetInlineability();
-}
-
-base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
- if (data_->should_access_heap()) {
- // Note that we use the synchronized accessor.
- Object value = object()->value(kAcquireLoad);
- if (!value.IsFeedbackVector()) return base::nullopt;
- auto vector_handle = broker()->CanonicalPersistentHandle(value);
- ObjectData* vector = broker()->TryGetOrCreateData(vector_handle);
- if (vector) {
- return FeedbackVectorRef(broker(), vector);
- }
- TRACE_BROKER_MISSING(
- broker(),
- "Unable to retrieve FeedbackVector from FeedbackCellRef " << *this);
- return base::nullopt;
- }
- ObjectData* vector = ObjectRef::data()->AsFeedbackCell()->value();
- return FeedbackVectorRef(broker(), vector->AsFeedbackVector());
-}
-
-base::Optional<ObjectRef> MapRef::GetStrongValue(
- InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- return instance_descriptors().GetStrongValue(descriptor_index);
-}
-
-DescriptorArrayRef MapRef::instance_descriptors() const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- return DescriptorArrayRef(
- broker(),
- broker()->CanonicalPersistentHandle(
- object()->instance_descriptors(broker()->isolate(), kRelaxedLoad)));
- }
-
- return DescriptorArrayRef(broker(), data()->AsMap()->instance_descriptors());
-}
-
-void MapRef::SerializeRootMap() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeRootMap(broker());
-}
-
-// TODO(solanes, v8:7790): Remove base::Optional from the return type when
-// deleting serialization.
-base::Optional<MapRef> MapRef::FindRootMap() const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // TODO(solanes): Remove the TryGetOrCreateData part when Map is moved to
- // kNeverSerialized.
- ObjectData* root_map =
- broker()->TryGetOrCreateData(broker()->CanonicalPersistentHandle(
- object()->FindRootMap(broker()->isolate())));
- if (root_map) {
- // TODO(solanes, v8:7790): Consider caching the result of the root map.
- return MapRef(broker(), root_map);
- }
- TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
- return base::nullopt;
- }
- ObjectData* map_data = data()->AsMap()->FindRootMap();
- if (map_data != nullptr) {
- return MapRef(broker(), map_data);
- }
- TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
- return base::nullopt;
-}
-
-bool JSTypedArrayRef::is_on_heap() const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. with
- // release-store.
- return object()->is_on_heap(kAcquireLoad);
- }
- return data()->AsJSTypedArray()->data_ptr();
-}
-
-size_t JSTypedArrayRef::length() const {
- CHECK(!is_on_heap());
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return object()->length();
- }
- return data()->AsJSTypedArray()->length();
-}
-
-HeapObjectRef JSTypedArrayRef::buffer() const {
- CHECK(!is_on_heap());
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- Handle<JSArrayBuffer> value =
- broker()->CanonicalPersistentHandle(object()->buffer());
- return JSObjectRef{broker(), value};
- }
- return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
-}
-
-void* JSTypedArrayRef::data_ptr() const {
- CHECK(!is_on_heap());
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. protected by
- // the is_on_heap release/acquire semantics (external_pointer store
- // happens-before base_pointer store, and this external_pointer load
- // happens-after base_pointer load).
- STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
- return object()->DataPtr();
- }
- return data()->AsJSTypedArray()->data_ptr();
-}
-
-bool MapRef::IsInobjectSlackTrackingInProgress() const {
- IF_ACCESS_FROM_HEAP_WITH_FLAG_C(IsInobjectSlackTrackingInProgress);
- return Map::Bits3::ConstructionCounterBits::decode(
- data()->AsMap()->bit_field3()) != Map::kNoSlackTracking;
-}
-
-int MapRef::constructor_function_index() const {
- IF_ACCESS_FROM_HEAP_C(GetConstructorFunctionIndex);
- CHECK(IsPrimitiveMap());
- return data()->AsMap()->constructor_function_index();
-}
-
-bool MapRef::is_stable() const {
- IF_ACCESS_FROM_HEAP_C(is_stable);
- return !Map::Bits3::IsUnstableBit::decode(data()->AsMap()->bit_field3());
-}
-
-bool MapRef::CanBeDeprecated() const {
- IF_ACCESS_FROM_HEAP_C(CanBeDeprecated);
- CHECK_GT(NumberOfOwnDescriptors(), 0);
- return data()->AsMap()->can_be_deprecated();
-}
-
-bool MapRef::CanTransition() const {
- IF_ACCESS_FROM_HEAP_C(CanTransition);
- return data()->AsMap()->can_transition();
-}
-
-int MapRef::GetInObjectPropertiesStartInWords() const {
- IF_ACCESS_FROM_HEAP_C(GetInObjectPropertiesStartInWords);
- return data()->AsMap()->in_object_properties_start_in_words();
-}
-
-int MapRef::GetInObjectProperties() const {
- IF_ACCESS_FROM_HEAP_C(GetInObjectProperties);
- return data()->AsMap()->in_object_properties();
-}
-
-void ScopeInfoRef::SerializeScopeInfoChain() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsScopeInfo()->SerializeScopeInfoChain(broker());
-}
-
-bool StringRef::IsExternalString() const {
- IF_ACCESS_FROM_HEAP_C(IsExternalString);
- return data()->AsString()->is_external_string();
-}
-
-Address CallHandlerInfoRef::callback() const {
- if (data_->should_access_heap()) {
- return v8::ToCData<Address>(object()->callback());
- }
- return HeapObjectRef::data()->AsCallHandlerInfo()->callback();
-}
-
-Address FunctionTemplateInfoRef::c_function() const {
- if (data_->should_access_heap()) {
- return v8::ToCData<Address>(object()->GetCFunction());
- }
- return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_function();
-}
-
-const CFunctionInfo* FunctionTemplateInfoRef::c_signature() const {
- if (data_->should_access_heap()) {
- return v8::ToCData<CFunctionInfo*>(object()->GetCSignature());
- }
- return HeapObjectRef::data()->AsFunctionTemplateInfo()->c_signature();
-}
-
-bool StringRef::IsSeqString() const {
- IF_ACCESS_FROM_HEAP_C(IsSeqString);
- return data()->AsString()->is_seq_string();
-}
-
-ScopeInfoRef NativeContextRef::scope_info() const {
- if (data_->should_access_heap()) {
- return ScopeInfoRef(
- broker(), broker()->CanonicalPersistentHandle(object()->scope_info()));
- }
- return ScopeInfoRef(broker(), data()->AsNativeContext()->scope_info());
-}
-
-SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
- if (data_->should_access_heap()) {
- return SharedFunctionInfoRef(
- broker(),
- broker()->CanonicalPersistentHandle(object()->shared_function_info()));
- }
-
- return SharedFunctionInfoRef(
- broker(), data()->AsFeedbackVector()->shared_function_info());
-}
-
-MapRef NativeContextRef::GetFunctionMapFromIndex(int index) const {
- DCHECK_GE(index, Context::FIRST_FUNCTION_MAP_INDEX);
- DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
- if (data_->should_access_heap()) {
- return get(index).value().AsMap();
- }
- return MapRef(broker(), data()->AsNativeContext()->function_maps().at(
- index - Context::FIRST_FUNCTION_MAP_INDEX));
-}
-
-MapRef NativeContextRef::GetInitialJSArrayMap(ElementsKind kind) const {
- switch (kind) {
- case PACKED_SMI_ELEMENTS:
- return js_array_packed_smi_elements_map();
- case HOLEY_SMI_ELEMENTS:
- return js_array_holey_smi_elements_map();
- case PACKED_DOUBLE_ELEMENTS:
- return js_array_packed_double_elements_map();
- case HOLEY_DOUBLE_ELEMENTS:
- return js_array_holey_double_elements_map();
- case PACKED_ELEMENTS:
- return js_array_packed_elements_map();
- case HOLEY_ELEMENTS:
- return js_array_holey_elements_map();
- default:
- UNREACHABLE();
- }
-}
-
-base::Optional<JSFunctionRef> NativeContextRef::GetConstructorFunction(
- const MapRef& map) const {
- CHECK(map.IsPrimitiveMap());
- switch (map.constructor_function_index()) {
- case Map::kNoConstructorFunctionIndex:
- return base::nullopt;
- case Context::BIGINT_FUNCTION_INDEX:
- return bigint_function();
- case Context::BOOLEAN_FUNCTION_INDEX:
- return boolean_function();
- case Context::NUMBER_FUNCTION_INDEX:
- return number_function();
- case Context::STRING_FUNCTION_INDEX:
- return string_function();
- case Context::SYMBOL_FUNCTION_INDEX:
- return symbol_function();
- default:
- UNREACHABLE();
- }
-}
-
-bool ObjectRef::IsNull() const { return object()->IsNull(); }
-
-bool ObjectRef::IsNullOrUndefined() const {
- if (IsSmi()) return false;
- OddballType type = AsHeapObject().map().oddball_type();
- return type == OddballType::kNull || type == OddballType::kUndefined;
-}
-
-bool ObjectRef::IsTheHole() const {
- return IsHeapObject() &&
- AsHeapObject().map().oddball_type() == OddballType::kHole;
-}
-
-bool ObjectRef::BooleanValue() const {
- if (data_->should_access_heap()) {
- return object()->BooleanValue(broker()->isolate());
- }
- return IsSmi() ? (AsSmi() != 0) : data()->AsHeapObject()->boolean_value();
-}
-
-Maybe<double> ObjectRef::OddballToNumber() const {
- OddballType type = AsHeapObject().map().oddball_type();
-
- switch (type) {
- case OddballType::kBoolean: {
- ObjectRef true_ref(broker(),
- broker()->isolate()->factory()->true_value());
- return this->equals(true_ref) ? Just(1.0) : Just(0.0);
- break;
- }
- case OddballType::kUndefined: {
- return Just(std::numeric_limits<double>::quiet_NaN());
- break;
- }
- case OddballType::kNull: {
- return Just(0.0);
- break;
- }
- default: {
- return Nothing<double>();
- break;
- }
- }
-}
-
-base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
- uint32_t index, SerializationPolicy policy) const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // `elements` are currently still serialized as members of JSObjectRef.
- // TODO(jgruber,v8:7790): Once JSObject is no longer serialized, we must
- // guarantee consistency between `object`, `elements_kind` and `elements`
- // through other means (store/load order? locks? storing elements_kind in
- // elements.map?).
- STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
-
- base::Optional<FixedArrayBaseRef> maybe_elements_ref = elements();
- if (!maybe_elements_ref.has_value()) {
- TRACE_BROKER_MISSING(broker(), "JSObject::elements" << *this);
- return {};
- }
-
- FixedArrayBaseRef elements_ref = maybe_elements_ref.value();
- ElementsKind elements_kind = GetElementsKind();
-
- DCHECK_LE(index, JSObject::kMaxElementIndex);
-
- // See also ElementsAccessorBase::GetMaxIndex.
- if (IsJSArray()) {
- // For JSArrays we additionally need to check against JSArray::length.
- // Length_unsafe is safe to use in this case since:
- // - GetOwnConstantElement only detects a constant for JSArray holders if
- // the array is frozen/sealed.
- // - Frozen/sealed arrays can't change length.
- // - We've already seen a map with frozen/sealed elements_kinds (above);
- // - The release-load of that map ensures we read the newest value
- // of `length` below.
- uint32_t array_length;
- if (!AsJSArray().length_unsafe().object()->ToArrayLength(&array_length)) {
- return {};
- }
- if (index >= array_length) return {};
- }
-
- Object maybe_element;
- auto result = ConcurrentLookupIterator::TryGetOwnConstantElement(
- &maybe_element, broker()->isolate(), broker()->local_isolate(),
- *object(), *elements_ref.object(), elements_kind, index);
-
- if (result == ConcurrentLookupIterator::kGaveUp) {
- TRACE_BROKER_MISSING(broker(), "JSObject::GetOwnConstantElement on "
- << *this << " at index " << index);
- return {};
- } else if (result == ConcurrentLookupIterator::kNotPresent) {
- return {};
- }
-
- DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
- return ObjectRef{broker(),
- broker()->CanonicalPersistentHandle(maybe_element)};
- } else {
- ObjectData* element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
- }
-}
-
-base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
- Representation field_representation, FieldIndex index,
- SerializationPolicy policy) const {
- if (data_->should_access_heap()) {
- return GetOwnFastDataPropertyFromHeap(broker(),
- Handle<JSObject>::cast(object()),
- field_representation, index);
- }
- ObjectData* property = data()->AsJSObject()->GetOwnFastDataProperty(
- broker(), field_representation, index, policy);
- if (property == nullptr) return base::nullopt;
- return ObjectRef(broker(), property);
-}
-
-ObjectRef JSObjectRef::GetOwnDictionaryProperty(
- InternalIndex index, SerializationPolicy policy) const {
- CHECK(index.is_found());
- if (data_->should_access_heap()) {
- return GetOwnDictionaryPropertyFromHeap(
- broker(), Handle<JSObject>::cast(object()), index);
- }
- ObjectData* property =
- data()->AsJSObject()->GetOwnDictionaryProperty(broker(), index, policy);
- CHECK_NE(property, nullptr);
- return ObjectRef(broker(), property);
-}
-
-ObjectRef JSArrayRef::GetBoilerplateLength() const {
- // Safe to read concurrently because:
- // - boilerplates are immutable after initialization.
- // - boilerplates are published into the feedback vector.
- return length_unsafe();
-}
-
-ObjectRef JSArrayRef::length_unsafe() const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- Object o = object()->length(broker()->isolate(), kRelaxedLoad);
- return ObjectRef{broker(), broker()->CanonicalPersistentHandle(o)};
- } else {
- return ObjectRef{broker(), data()->AsJSArray()->length()};
- }
-}
-
-base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy) const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // `elements` are currently still serialized as members of JSObjectRef.
- // TODO(jgruber,v8:7790): Remove the elements equality DCHECK below once
- // JSObject is no longer serialized.
- static_assert(std::is_base_of<JSObject, JSArray>::value, "");
- STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
-
- // The elements_ref is passed in by callers to make explicit that it is
- // also used outside of this function, and must match the `elements` used
- // inside this function.
- DCHECK(elements_ref.equals(elements().value()));
-
- // Due to concurrency, the kind read here may not be consistent with
- // `elements_ref`. But consistency is guaranteed at runtime due to the
- // `elements` equality check in the caller.
- ElementsKind elements_kind = GetElementsKind();
-
- // We only inspect fixed COW arrays, which may only occur for fast
- // smi/objects elements kinds.
- if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
- DCHECK(IsFastElementsKind(elements_kind));
- if (!elements_ref.map().IsFixedCowArrayMap()) return {};
-
- // As the name says, the `length` read here is unsafe and may not match
- // `elements`. We rely on the invariant that any `length` change will
- // also result in an `elements` change to make this safe. The `elements`
- // equality check in the caller thus also guards the value of `length`.
- ObjectRef length_ref = length_unsafe();
-
- // Likewise we only deal with smi lengths.
- if (!length_ref.IsSmi()) return {};
-
- base::Optional<Object> result =
- ConcurrentLookupIterator::TryGetOwnCowElement(
- broker()->isolate(), *elements_ref.AsFixedArray().object(),
- elements_kind, length_ref.AsSmi(), index);
-
- if (!result.has_value()) return {};
-
- return ObjectRef{broker(),
- broker()->CanonicalPersistentHandle(result.value())};
- } else {
- DCHECK(!data_->should_access_heap());
- DCHECK(!FLAG_turbo_direct_heap_access);
-
- // Just to clarify that `elements_ref` is not used on this path.
- // GetOwnElement accesses the serialized `elements` field on its own.
- USE(elements_ref);
-
- if (!elements().value().map().IsFixedCowArrayMap()) return base::nullopt;
-
- ObjectData* element =
- data()->AsJSArray()->GetOwnElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
- }
-}
-
-base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- return CellRef(broker(), broker()->CanonicalPersistentHandle(
- object()->GetCell(cell_index)));
- }
- ObjectData* cell =
- data()->AsSourceTextModule()->GetCell(broker(), cell_index);
- if (cell == nullptr) return base::nullopt;
- return CellRef(broker(), cell);
-}
-
-ObjectRef SourceTextModuleRef::import_meta() const {
- if (data_->should_access_heap()) {
- return ObjectRef(
- broker(), broker()->CanonicalPersistentHandle(object()->import_meta()));
- }
- return ObjectRef(broker(),
- data()->AsSourceTextModule()->GetImportMeta(broker()));
-}
-
-ObjectRef::ObjectRef(JSHeapBroker* broker, Handle<Object> object,
- BackgroundSerialization background_serialization,
- bool check_type)
- : broker_(broker) {
- CHECK_NE(broker->mode(), JSHeapBroker::kRetired);
-
- data_ = broker->GetOrCreateData(object, background_serialization);
- if (!data_) { // TODO(mslekova): Remove once we're on the background thread.
- object->Print();
- }
- CHECK_WITH_MSG(data_ != nullptr, "Object is not known to the heap broker");
-}
-
-namespace {
-OddballType GetOddballType(Isolate* isolate, Map map) {
- if (map.instance_type() != ODDBALL_TYPE) {
- return OddballType::kNone;
- }
- ReadOnlyRoots roots(isolate);
- if (map == roots.undefined_map()) {
- return OddballType::kUndefined;
- }
- if (map == roots.null_map()) {
- return OddballType::kNull;
- }
- if (map == roots.boolean_map()) {
- return OddballType::kBoolean;
- }
- if (map == roots.the_hole_map()) {
- return OddballType::kHole;
- }
- if (map == roots.uninitialized_map()) {
- return OddballType::kUninitialized;
- }
- DCHECK(map == roots.termination_exception_map() ||
- map == roots.arguments_marker_map() ||
- map == roots.optimized_out_map() || map == roots.stale_register_map());
- return OddballType::kOther;
-}
-} // namespace
-
-HeapObjectType HeapObjectRef::GetHeapObjectType() const {
- if (data_->should_access_heap()) {
- Map map = Handle<HeapObject>::cast(object())->map();
- HeapObjectType::Flags flags(0);
- if (map.is_undetectable()) flags |= HeapObjectType::kUndetectable;
- if (map.is_callable()) flags |= HeapObjectType::kCallable;
- return HeapObjectType(map.instance_type(), flags,
- GetOddballType(broker()->isolate(), map));
- }
- HeapObjectType::Flags flags(0);
- if (map().is_undetectable()) flags |= HeapObjectType::kUndetectable;
- if (map().is_callable()) flags |= HeapObjectType::kCallable;
- return HeapObjectType(map().instance_type(), flags, map().oddball_type());
-}
-base::Optional<JSObjectRef> AllocationSiteRef::boilerplate() const {
- if (data_->should_access_heap()) {
- return JSObjectRef(
- broker(), broker()->CanonicalPersistentHandle(object()->boilerplate()));
- }
- ObjectData* boilerplate = data()->AsAllocationSite()->boilerplate();
- if (boilerplate) {
- return JSObjectRef(broker(), boilerplate);
- } else {
- return base::nullopt;
- }
-}
-
-ElementsKind JSObjectRef::GetElementsKind() const {
- return map().elements_kind();
-}
-
-base::Optional<FixedArrayBaseRef> JSObjectRef::elements() const {
- if (data_->should_access_heap()) {
- return FixedArrayBaseRef(
- broker(), broker()->CanonicalPersistentHandle(object()->elements()));
- }
- const JSObjectData* d = data()->AsJSObject();
- if (!d->serialized_elements()) {
- TRACE(broker(), "'elements' on " << this);
- return base::nullopt;
- }
- return FixedArrayBaseRef(broker(), d->elements());
-}
-
-int FixedArrayBaseRef::length() const {
- IF_ACCESS_FROM_HEAP_C(length);
- return data()->AsFixedArrayBase()->length();
-}
-
-ObjectData* FixedArrayData::Get(int i) const {
- CHECK_LT(i, static_cast<int>(contents_.size()));
- CHECK_NOT_NULL(contents_[i]);
- return contents_[i];
-}
-
-Float64 FixedDoubleArrayData::Get(int i) const {
- CHECK_LT(i, static_cast<int>(contents_.size()));
- return contents_[i];
-}
-
-PropertyDetails DescriptorArrayRef::GetPropertyDetails(
- InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- return object()->GetDetails(descriptor_index);
- }
- return data()->AsDescriptorArray()->GetPropertyDetails(descriptor_index);
-}
-
-NameRef DescriptorArrayRef::GetPropertyKey(
- InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- NameRef result(broker(), broker()->CanonicalPersistentHandle(
- object()->GetKey(descriptor_index)));
- CHECK(result.IsUniqueName());
- return result;
- }
- return NameRef(broker(),
- data()->AsDescriptorArray()->GetPropertyKey(descriptor_index));
-}
-
-ObjectRef DescriptorArrayRef::GetFieldType(
- InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- // This method only gets called for the creation of FieldTypeDependencies.
- // These calls happen when the broker is either disabled or serializing,
- // which means that GetOrCreateData would be able to successfully create the
- // ObjectRef for the cases where we haven't seen the FieldType before.
- DCHECK(broker()->mode() == JSHeapBroker::kDisabled ||
- broker()->mode() == JSHeapBroker::kSerializing);
- return ObjectRef(broker(), broker()->CanonicalPersistentHandle(
- object()->GetFieldType(descriptor_index)));
- }
- return ObjectRef(broker(),
- data()->AsDescriptorArray()->GetFieldType(descriptor_index));
-}
-
-base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
- InternalIndex descriptor_index) const {
- if (data_->should_access_heap()) {
- HeapObject heap_object;
- if (object()
- ->GetValue(descriptor_index)
- .GetHeapObjectIfStrong(&heap_object)) {
- // Since the descriptors in the descriptor array can be changed in-place
- // via DescriptorArray::Replace, we might get a value that we haven't seen
- // before.
- ObjectData* data = broker()->TryGetOrCreateData(
- broker()->CanonicalPersistentHandle(heap_object));
- if (data) return ObjectRef(broker(), data);
-
- TRACE_BROKER_MISSING(broker(), "strong value for descriptor array "
- << *this << " at index "
- << descriptor_index.as_int());
- // Fall through to the base::nullopt below.
- }
- return base::nullopt;
- }
- ObjectData* value =
- data()->AsDescriptorArray()->GetStrongValue(descriptor_index);
- if (!value) return base::nullopt;
- return ObjectRef(broker(), value);
-}
-
-base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
- const {
- if (value()) {
- FeedbackVectorRef vector = *value();
- if (vector.serialized()) {
- return vector.shared_function_info();
- }
- }
- return base::nullopt;
-}
-
-void FeedbackVectorRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsFeedbackVector()->Serialize(broker());
-}
-
-bool FeedbackVectorRef::serialized() const {
- if (data_->should_access_heap()) return true;
- return data()->AsFeedbackVector()->serialized();
-}
-
-bool NameRef::IsUniqueName() const {
- // Must match Name::IsUniqueName.
- return IsInternalizedString() || IsSymbol();
-}
-
-void RegExpBoilerplateDescriptionRef::Serialize() {
- if (data_->should_access_heap()) {
- // Even if the regexp boilerplate object itself is no longer serialized,
- // the `data` still is and thus we need to make sure to visit it.
- // TODO(jgruber,v8:7790): Remove once it is no longer a serialized type.
- STATIC_ASSERT(IsSerializedHeapObject<FixedArray>());
- FixedArrayRef data_ref{
- broker(), broker()->CanonicalPersistentHandle(object()->data())};
- } else {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- HeapObjectRef::data()->AsRegExpBoilerplateDescription()->Serialize(
- broker());
- }
-}
-
-Handle<Object> ObjectRef::object() const {
-#ifdef DEBUG
- if (broker()->mode() == JSHeapBroker::kSerialized &&
- data_->used_status == ObjectData::Usage::kUnused) {
- data_->used_status = ObjectData::Usage::kOnlyIdentityUsed;
- }
-#endif // DEBUG
- return data_->object();
-}
-
-#ifdef DEBUG
-#define DEF_OBJECT_GETTER(T) \
- Handle<T> T##Ref::object() const { \
- if (broker()->mode() == JSHeapBroker::kSerialized && \
- data_->used_status == ObjectData::Usage::kUnused) { \
- data_->used_status = ObjectData::Usage::kOnlyIdentityUsed; \
- } \
- return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
- }
-#else
-#define DEF_OBJECT_GETTER(T) \
- Handle<T> T##Ref::object() const { \
- return Handle<T>(reinterpret_cast<Address*>(data_->object().address())); \
- }
-#endif // DEBUG
-
-HEAP_BROKER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
-HEAP_BROKER_POSSIBLY_BACKGROUND_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
-HEAP_BROKER_BACKGROUND_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
-HEAP_BROKER_NEVER_SERIALIZED_OBJECT_LIST(DEF_OBJECT_GETTER)
-#undef DEF_OBJECT_GETTER
-
-JSHeapBroker* ObjectRef::broker() const { return broker_; }
-
-ObjectData* ObjectRef::data() const {
- switch (broker()->mode()) {
- case JSHeapBroker::kDisabled:
- CHECK_NE(data_->kind(), kSerializedHeapObject);
- return data_;
- case JSHeapBroker::kSerializing:
- CHECK_NE(data_->kind(), kUnserializedHeapObject);
- return data_;
- case JSHeapBroker::kSerialized:
-#ifdef DEBUG
- data_->used_status = ObjectData::Usage::kDataUsed;
-#endif // DEBUG
- CHECK_NE(data_->kind(), kUnserializedHeapObject);
- return data_;
- case JSHeapBroker::kRetired:
- UNREACHABLE();
- }
-}
-
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line) {
- TRACE_MISSING(broker, "data in function " << function << " at line " << line);
- return AdvancedReducer::NoChange();
-}
-
-NativeContextData::NativeContextData(JSHeapBroker* broker, ObjectData** storage,
- Handle<NativeContext> object)
- : ContextData(broker, storage, object),
- state_(State::kUnserialized),
- function_maps_(broker->zone()) {}
-
-void NativeContextData::Serialize(JSHeapBroker* broker) {
- if (state_ != State::kUnserialized) return;
- state_ = State::kSerializedOnMainThread;
-
- TraceScope tracer(broker, this, "NativeContextData::Serialize");
- Handle<NativeContext> context = Handle<NativeContext>::cast(object());
-
-#define SERIALIZE_MEMBER(type, name) \
- DCHECK_NULL(name##_); \
- name##_ = broker->GetOrCreateData(context->name()); \
- if (!name##_->should_access_heap()) { \
- if (name##_->IsMap() && \
- !InstanceTypeChecker::IsContext(name##_->AsMap()->instance_type())) { \
- name##_->AsMap()->SerializeConstructor(broker); \
- } \
- if (name##_->IsJSFunction()) { \
- name##_->AsJSFunction()->Serialize(broker); \
- } \
- }
- BROKER_COMPULSORY_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
- if (!broker->is_isolate_bootstrapping()) {
- BROKER_OPTIONAL_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
- }
-#undef SERIALIZE_MEMBER
-
- if (!bound_function_with_constructor_map_->should_access_heap()) {
- bound_function_with_constructor_map_->AsMap()->SerializePrototype(broker);
- }
- if (!bound_function_without_constructor_map_->should_access_heap()) {
- bound_function_without_constructor_map_->AsMap()->SerializePrototype(
- broker);
- }
-
- scope_info_ = broker->GetOrCreateData(context->scope_info());
-}
-
-void NativeContextData::SerializeOnBackground(JSHeapBroker* broker) {
- if (state_ == State::kFullySerialized) return;
- DCHECK_EQ(state_, State::kSerializedOnMainThread);
- state_ = State::kSerializedOnMainThread;
-
- UnparkedScopeIfNeeded unparked_scope(broker);
- TraceScope tracer(broker, this, "NativeContextData::SerializeOnBackground");
- Handle<NativeContext> context = Handle<NativeContext>::cast(object());
-
- constexpr auto kAllowed = ObjectRef::BackgroundSerialization::kAllowed;
-#define SERIALIZE_MEMBER(type, name) \
- DCHECK_NULL(name##_); \
- name##_ = broker->GetOrCreateData(context->name(), kAllowed); \
- if (!name##_->should_access_heap()) { \
- DCHECK(!name##_->IsJSFunction()); \
- }
- BROKER_COMPULSORY_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
- if (!broker->is_isolate_bootstrapping()) {
- BROKER_OPTIONAL_BACKGROUND_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
- }
-#undef SERIALIZE_MEMBER
-
- DCHECK(function_maps_.empty());
- int const first = Context::FIRST_FUNCTION_MAP_INDEX;
- int const last = Context::LAST_FUNCTION_MAP_INDEX;
- function_maps_.reserve(last + 1 - first);
- for (int i = first; i <= last; ++i) {
- function_maps_.push_back(
- broker->GetOrCreateData(context->get(i), kAllowed));
- }
-}
-
-void JSFunctionRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSFunction()->Serialize(broker());
-}
-
-void JSFunctionRef::SerializeCodeAndFeedback() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSFunction()->SerializeCodeAndFeedback(broker());
-}
-
-bool JSBoundFunctionRef::serialized() const {
- if (data_->should_access_heap()) return true;
- return data()->AsJSBoundFunction()->serialized();
-}
-
-bool JSFunctionRef::serialized() const {
- if (data_->should_access_heap()) return true;
- return data()->AsJSFunction()->serialized();
-}
-
-bool JSFunctionRef::serialized_code_and_feedback() const {
- if (data_->should_access_heap()) return true;
- return data()->AsJSFunction()->serialized_code_and_feedback();
-}
-
-CodeRef JSFunctionRef::code() const {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- return CodeRef(broker(), broker()->CanonicalPersistentHandle(
- object()->code(kAcquireLoad)));
- }
-
- return CodeRef(broker(), ObjectRef::data()->AsJSFunction()->code());
-}
-
-void SharedFunctionInfoRef::SerializeFunctionTemplateInfo() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsSharedFunctionInfo()->SerializeFunctionTemplateInfo(broker());
-}
-
-void SharedFunctionInfoRef::SerializeScopeInfoChain() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsSharedFunctionInfo()->SerializeScopeInfoChain(broker());
-}
-
-base::Optional<FunctionTemplateInfoRef>
-SharedFunctionInfoRef::function_template_info() const {
- if (data_->should_access_heap()) {
- if (object()->IsApiFunction()) {
- ObjectData* data =
- broker()->TryGetOrCreateData(broker()->CanonicalPersistentHandle(
- object()->function_data(kAcquireLoad)));
- if (data == nullptr) return base::nullopt;
- return FunctionTemplateInfoRef(broker(), data, true);
- }
- return base::nullopt;
- }
- ObjectData* function_template_info =
- data()->AsSharedFunctionInfo()->function_template_info();
- if (!function_template_info) return base::nullopt;
- return FunctionTemplateInfoRef(broker(), function_template_info);
-}
-
-int SharedFunctionInfoRef::context_header_size() const {
- IF_ACCESS_FROM_HEAP_C(scope_info().ContextHeaderLength);
- return data()->AsSharedFunctionInfo()->context_header_size();
-}
-
-ScopeInfoRef SharedFunctionInfoRef::scope_info() const {
- if (data_->should_access_heap()) {
- return ScopeInfoRef(
- broker(), broker()->CanonicalPersistentHandle(object()->scope_info()));
- }
- return ScopeInfoRef(broker(), data()->AsSharedFunctionInfo()->scope_info());
-}
-
-void JSObjectRef::SerializeObjectCreateMap() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSObject()->SerializeObjectCreateMap(broker());
-}
-
-void MapRef::SerializeOwnDescriptor(InternalIndex descriptor_index) {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeOwnDescriptor(broker(), descriptor_index);
-}
-
-bool MapRef::serialized_own_descriptor(InternalIndex descriptor_index) const {
- CHECK_LT(descriptor_index.as_int(), NumberOfOwnDescriptors());
- if (data_->should_access_heap()) return true;
- ObjectData* maybe_desc_array_data = data()->AsMap()->instance_descriptors();
- if (!maybe_desc_array_data) return false;
- if (maybe_desc_array_data->should_access_heap()) return true;
- DescriptorArrayData* desc_array_data =
- maybe_desc_array_data->AsDescriptorArray();
- return desc_array_data->serialized_descriptor(descriptor_index);
-}
-
-void MapRef::SerializeBackPointer() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeBackPointer(broker());
-}
-
-void MapRef::SerializePrototype() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializePrototype(broker());
-}
-
-bool MapRef::serialized_prototype() const {
- if (data_->should_access_heap()) return true;
- CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
- return data()->AsMap()->serialized_prototype();
-}
-
-void SourceTextModuleRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsSourceTextModule()->Serialize(broker());
-}
-
-void NativeContextRef::Serialize() {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsNativeContext()->Serialize(broker());
-}
-
-void NativeContextRef::SerializeOnBackground() {
- if (data_->should_access_heap()) return;
- CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
- broker()->mode() == JSHeapBroker::kSerialized);
- data()->AsNativeContext()->SerializeOnBackground(broker());
-}
-
-void JSTypedArrayRef::Serialize() {
- if (data_->should_access_heap() || FLAG_turbo_direct_heap_access) {
- // Even if the typed array object itself is no longer serialized (besides
- // the JSObject parts), the `buffer` field still is and thus we need to
- // make sure to visit it.
- // TODO(jgruber,v8:7790): Remove once JSObject is no longer serialized.
- static_assert(
- std::is_base_of<JSObject, decltype(object()->buffer())>::value, "");
- STATIC_ASSERT(IsSerializedHeapObject<JSObject>());
- JSObjectRef data_ref{
- broker(), broker()->CanonicalPersistentHandle(object()->buffer())};
- } else {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker());
- }
-}
-
-bool JSTypedArrayRef::serialized() const {
- CHECK_NE(broker()->mode(), JSHeapBroker::kDisabled);
- return data()->AsJSTypedArray()->serialized();
-}
-
-bool JSTypedArrayRef::ShouldHaveBeenSerialized() const {
- if (FLAG_turbo_direct_heap_access) return false;
- return ObjectRef::ShouldHaveBeenSerialized();
-}
-
-bool JSBoundFunctionRef::Serialize() {
- if (data_->should_access_heap()) return true;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- return data()->AsJSBoundFunction()->Serialize(broker());
-}
-
-bool PropertyCellRef::Serialize() const {
- if (data_->should_access_heap()) return true;
- CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
- broker()->mode() == JSHeapBroker::kSerialized);
- return data()->AsPropertyCell()->Serialize(broker());
-}
-
-void FunctionTemplateInfoRef::SerializeCallCode() {
- if (data_->should_access_heap()) {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- // CallHandlerInfo::data may still hold a serialized heap object, so we
- // have to make the broker aware of it.
- // TODO(v8:7790): Remove this case once ObjectRef is never serialized.
- Handle<HeapObject> call_code(object()->call_code(kAcquireLoad),
- broker()->isolate());
- if (call_code->IsCallHandlerInfo()) {
- broker()->GetOrCreateData(
- Handle<CallHandlerInfo>::cast(call_code)->data());
- }
- return;
- }
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsFunctionTemplateInfo()->SerializeCallCode(broker());
-}
-
-base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
- NameRef const& name, SerializationPolicy policy) const {
- if (data_->should_access_heap()) {
- return GetPropertyCellFromHeap(broker(), name.object());
- }
- ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
- broker(), name.data(), policy);
- if (property_cell_data == nullptr) return base::nullopt;
- return PropertyCellRef(broker(), property_cell_data);
+bool JSHeapBroker::ObjectMayBeUninitialized(HeapObject object) const {
+ return !IsMainThread() && isolate()->heap()->IsPendingAllocation(object);
}
bool CanInlineElementAccess(MapRef const& map) {
@@ -4942,7 +459,7 @@ ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
for (auto const& group : transition_groups()) {
for (Handle<Map> map : group) {
- if (!MapRef(broker, map).IsStringMap()) return false;
+ if (!MakeRef(broker, map).IsStringMap()) return false;
}
}
return true;
@@ -5008,6 +525,7 @@ bool JSHeapBroker::FeedbackIsInsufficient(FeedbackSource const& source) const {
}
namespace {
+
// Remove unupdatable and abandoned prototype maps in-place.
void FilterRelevantReceiverMaps(Isolate* isolate, MapHandles* maps) {
auto in = maps->begin();
@@ -5074,6 +592,7 @@ bool HasMigrationTargets(const MapHandles& maps) {
}
return false;
}
+
} // namespace
bool JSHeapBroker::CanUseFeedback(const FeedbackNexus& nexus) const {
@@ -5166,12 +685,12 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
isolate(), target_native_context().script_context_table().object(),
script_context_index);
{
- ObjectRef contents(this,
- handle(context->get(context_slot_index), isolate()));
+ ObjectRef contents =
+ MakeRef(this, handle(context->get(context_slot_index), isolate()));
CHECK(!contents.equals(
- ObjectRef(this, isolate()->factory()->the_hole_value())));
+ MakeRef<Object>(this, isolate()->factory()->the_hole_value())));
}
- ContextRef context_ref(this, context);
+ ContextRef context_ref = MakeRef(this, context);
if (immutable) {
context_ref.get(context_slot_index,
SerializationPolicy::kSerializeIfNeeded);
@@ -5183,11 +702,10 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForGlobalAccess(
CHECK(feedback_value->IsPropertyCell());
// The wanted name belongs (or did belong) to a property on the global
// object and the feedback is the cell holding its value.
- PropertyCellRef cell(this, Handle<PropertyCell>::cast(feedback_value));
- ObjectRef(
- this,
- CanonicalPersistentHandle(
- Handle<PropertyCell>::cast(feedback_value)->value(kAcquireLoad)));
+ PropertyCellRef cell =
+ MakeRef(this, Handle<PropertyCell>::cast(feedback_value));
+ MakeRef(this,
+ Handle<PropertyCell>::cast(feedback_value)->value(kAcquireLoad));
return *zone()->New<GlobalAccessFeedback>(cell, nexus.kind());
}
@@ -5228,7 +746,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForInstanceOf(
MaybeHandle<JSObject> maybe_constructor = nexus.GetConstructorFeedback();
Handle<JSObject> constructor;
if (maybe_constructor.ToHandle(&constructor)) {
- optional_constructor = JSObjectRef(this, constructor);
+ optional_constructor = MakeRef(this, constructor);
}
}
return *zone()->New<InstanceOfFeedback>(optional_constructor, nexus.kind());
@@ -5244,7 +762,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForArrayOrObjectLiteral(
return NewInsufficientFeedback(nexus.kind());
}
- AllocationSiteRef site(this, handle(object, isolate()));
+ AllocationSiteRef site =
+ MakeRef(this, handle(AllocationSite::cast(object), isolate()));
if (site.IsFastLiteral()) {
site.SerializeBoilerplate();
}
@@ -5262,7 +781,8 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForRegExpLiteral(
return NewInsufficientFeedback(nexus.kind());
}
- RegExpBoilerplateDescriptionRef boilerplate(this, handle(object, isolate()));
+ RegExpBoilerplateDescriptionRef boilerplate = MakeRef(
+ this, handle(RegExpBoilerplateDescription::cast(object), isolate()));
boilerplate.Serialize();
return *zone()->New<RegExpLiteralFeedback>(boilerplate, nexus.kind());
}
@@ -5277,7 +797,7 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForTemplateObject(
return NewInsufficientFeedback(nexus.kind());
}
- JSArrayRef array(this, handle(object, isolate()));
+ JSArrayRef array = MakeRef(this, handle(JSArray::cast(object), isolate()));
return *zone()->New<TemplateObjectFeedback>(array, nexus.kind());
}
@@ -5297,12 +817,14 @@ ProcessedFeedback const& JSHeapBroker::ReadFeedbackForCall(
MaybeObject maybe_target = nexus.GetFeedback();
HeapObject target_object;
if (maybe_target->GetHeapObject(&target_object)) {
- target_ref = HeapObjectRef(this, handle(target_object, isolate()));
+ target_ref = MakeRef(this, handle(target_object, isolate()));
}
}
float frequency = nexus.ComputeCallFrequency();
SpeculationMode mode = nexus.GetSpeculationMode();
- return *zone()->New<CallFeedback>(target_ref, frequency, mode, nexus.kind());
+ CallFeedbackContent content = nexus.GetCallFeedbackContent();
+ return *zone()->New<CallFeedback>(target_ref, frequency, mode, content,
+ nexus.kind());
}
BinaryOperationHint JSHeapBroker::GetFeedbackForBinaryOperation(
@@ -5468,7 +990,7 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (Handle<Map> map : maps) {
- MapRef map_ref(this, map);
+ MapRef map_ref = MakeRef(this, map);
map_ref.SerializeRootMap();
if (CanInlineElementAccess(map_ref) &&
@@ -5531,23 +1053,11 @@ void ElementAccessFeedback::AddGroup(TransitionGroup&& group) {
#endif
}
-std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
- if (!FLAG_concurrent_recompilation) {
- // We cannot be in a background thread so it's safe to read the heap.
- AllowHandleDereference allow_handle_dereference;
- return os << ref.data() << " {" << ref.object() << "}";
- } else if (ref.data_->should_access_heap()) {
- return os << ref.data() << " {" << ref.object() << "}";
- } else {
- return os << ref.data();
- }
-}
-
base::Optional<NameRef> JSHeapBroker::GetNameFeedback(
FeedbackNexus const& nexus) {
Name raw_name = nexus.GetName();
if (raw_name.is_null()) return base::nullopt;
- return NameRef(this, handle(raw_name, isolate()));
+ return MakeRef(this, handle(raw_name, isolate()));
}
PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
@@ -5557,7 +1067,8 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
auto it = property_access_infos_.find(target);
if (it != property_access_infos_.end()) return it->second;
- if (policy == SerializationPolicy::kAssumeSerialized) {
+ if (policy == SerializationPolicy::kAssumeSerialized &&
+ !FLAG_turbo_concurrent_get_property_access_info) {
TRACE_BROKER_MISSING(this, "PropertyAccessInfo for "
<< access_mode << " of property " << name
<< " on map " << map);
@@ -5569,7 +1080,8 @@ PropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
PropertyAccessInfo access_info = factory.ComputePropertyAccessInfo(
map.object(), name.object(), access_mode);
if (is_concurrent_inlining_) {
- CHECK_EQ(mode(), kSerializing);
+ CHECK_IMPLIES(!FLAG_turbo_concurrent_get_property_access_info,
+ mode() == kSerializing);
TRACE(this, "Storing PropertyAccessInfo for "
<< access_mode << " of property " << name << " on map "
<< map);
@@ -5664,27 +1176,7 @@ TemplateObjectFeedback const& ProcessedFeedback::AsTemplateObject() const {
return *static_cast<TemplateObjectFeedback const*>(this);
}
-unsigned CodeRef::GetInlinedBytecodeSize() const {
- if (data_->should_access_heap()) {
- unsigned value = object()->inlined_bytecode_size();
- if (value > 0) {
- // Don't report inlined bytecode size if the code object was already
- // deoptimized.
- value = object()->marked_for_deoptimization() ? 0 : value;
- }
- return value;
- }
-
- return ObjectRef::data()->AsCode()->inlined_bytecode_size();
-}
-
-#undef BIMODAL_ACCESSOR
-#undef BIMODAL_ACCESSOR_B
-#undef BIMODAL_ACCESSOR_C
-#undef IF_ACCESS_FROM_HEAP
-#undef IF_ACCESS_FROM_HEAP_C
#undef TRACE
-#undef TRACE_MISSING
} // namespace compiler
} // namespace internal
diff --git a/chromium/v8/src/compiler/js-heap-broker.h b/chromium/v8/src/compiler/js-heap-broker.h
index 32b09dddbc8..cc842ca8e6d 100644
--- a/chromium/v8/src/compiler/js-heap-broker.h
+++ b/chromium/v8/src/compiler/js-heap-broker.h
@@ -7,10 +7,12 @@
#include "src/base/compiler-specific.h"
#include "src/base/optional.h"
+#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/compiler/access-info.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/globals.h"
+#include "src/compiler/heap-refs.h"
#include "src/compiler/processed-feedback.h"
#include "src/compiler/refs-map.h"
#include "src/compiler/serializer-hints.h"
@@ -78,6 +80,18 @@ struct PropertyAccessTarget {
};
};
+enum GetOrCreateDataFlag {
+ // If set, a failure to create the data object results in a crash.
+ kCrashOnError = 1 << 0,
+ // If set, data construction assumes that the given object is protected by
+ // a memory fence (e.g. acquire-release) and thus fields required for
+ // construction (like Object::map) are safe to read. The protection can
+ // extend to some other situations as well.
+ kAssumeMemoryFence = 1 << 1,
+};
+using GetOrCreateDataFlags = base::Flags<GetOrCreateDataFlag>;
+DEFINE_OPERATORS_FOR_FLAGS(GetOrCreateDataFlags)
+
class V8_EXPORT_PRIVATE JSHeapBroker {
public:
JSHeapBroker(Isolate* isolate, Zone* broker_zone, bool tracing_enabled,
@@ -98,7 +112,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
}
void SetTargetNativeContextRef(Handle<NativeContext> native_context);
- void InitializeAndStartSerializing(Handle<NativeContext> native_context);
+ void InitializeAndStartSerializing();
Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
@@ -106,7 +120,8 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
bool is_native_context_independent() const {
- return code_kind_ == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
+ // TODO(jgruber,v8:8888): Remove dependent code.
+ return false;
}
bool generate_full_feedback_collection() const {
// NCI code currently collects full feedback.
@@ -145,29 +160,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void PrintRefsAnalysis() const;
#endif // DEBUG
- // Retruns the handle from root index table for read only heap objects.
+ // Returns the handle from root index table for read only heap objects.
Handle<Object> GetRootHandle(Object object);
// Never returns nullptr.
- ObjectData* GetOrCreateData(
- Handle<Object>,
- ObjectRef::BackgroundSerialization background_serialization =
- ObjectRef::BackgroundSerialization::kDisallowed);
- // Like the previous but wraps argument in handle first (for convenience).
- ObjectData* GetOrCreateData(
- Object, ObjectRef::BackgroundSerialization background_serialization =
- ObjectRef::BackgroundSerialization::kDisallowed);
+ ObjectData* GetOrCreateData(Handle<Object> object,
+ GetOrCreateDataFlags flags = {});
+ ObjectData* GetOrCreateData(Object object, GetOrCreateDataFlags flags = {});
// Gets data only if we have it. However, thin wrappers will be created for
// smis, read-only objects and never-serialized objects.
- ObjectData* TryGetOrCreateData(
- Handle<Object>, bool crash_on_error = false,
- ObjectRef::BackgroundSerialization background_serialization =
- ObjectRef::BackgroundSerialization::kDisallowed);
+ ObjectData* TryGetOrCreateData(Handle<Object> object,
+ GetOrCreateDataFlags flags = {});
+ ObjectData* TryGetOrCreateData(Object object,
+ GetOrCreateDataFlags flags = {});
// Check if {object} is any native context's %ArrayPrototype% or
// %ObjectPrototype%.
bool IsArrayOrObjectPrototype(const JSObjectRef& object) const;
+ bool IsArrayOrObjectPrototype(Handle<JSObject> object) const;
bool HasFeedback(FeedbackSource const& source) const;
void SetFeedback(FeedbackSource const& source,
@@ -240,6 +251,25 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource const& source,
SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ // Used to separate the problem of a concurrent GetPropertyAccessInfo (GPAI)
+ // from serialization. GPAI is currently called both during the serialization
+ // phase, and on the background thread. While some crucial objects (like
+ // JSObject) still must be serialized, we do the following:
+ // - Run GPAI during serialization to discover and serialize required objects.
+ // - After the serialization phase, clear cached property access infos.
+ // - On the background thread, rerun GPAI in a concurrent setting. The cache
+ // has been cleared, thus the actual logic runs again.
+ // Once all required object kinds no longer require serialization, this
+ // should be removed together with all GPAI calls during serialization.
+ void ClearCachedPropertyAccessInfos() {
+ CHECK(FLAG_turbo_concurrent_get_property_access_info);
+ property_access_infos_.clear();
+ }
+
+ // As above, clear cached ObjectData that can be reconstructed, i.e. is
+ // either never-serialized or background-serialized.
+ void ClearReconstructibleData();
+
StringRef GetTypedArrayStringTag(ElementsKind kind);
bool ShouldBeSerializedForCompilation(const SharedFunctionInfoRef& shared,
@@ -257,6 +287,14 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
LocalIsolate* local_isolate() const { return local_isolate_; }
+ // TODO(jgruber): Consider always having local_isolate_ set to a real value.
+ // This seems not entirely trivial since we currently reset local_isolate_ to
+ // nullptr at some point in the JSHeapBroker lifecycle.
+ LocalIsolate* local_isolate_or_isolate() const {
+ return local_isolate() != nullptr ? local_isolate()
+ : isolate()->AsLocalIsolate();
+ }
+
// Return the corresponding canonical persistent handle for {object}. Create
// one if it does not exist.
// If we have the canonical map, we can create the canonical & persistent
@@ -291,6 +329,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
template <typename T>
Handle<T> CanonicalPersistentHandle(Handle<T> object) {
+ if (object.is_null()) return object; // Can't deref a null handle.
return CanonicalPersistentHandle<T>(*object);
}
@@ -313,6 +352,34 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
RootIndexMap const& root_index_map() { return root_index_map_; }
+ // Locks {mutex} through the duration of this scope iff it is the first
+ // occurrence. This is done to have a recursive shared lock on {mutex}.
+ class V8_NODISCARD MapUpdaterGuardIfNeeded final {
+ public:
+ explicit MapUpdaterGuardIfNeeded(JSHeapBroker* ptr,
+ base::SharedMutex* mutex)
+ : ptr_(ptr),
+ initial_map_updater_mutex_depth_(ptr->map_updater_mutex_depth_),
+ shared_mutex(mutex, should_lock()) {
+ ptr_->map_updater_mutex_depth_++;
+ }
+
+ ~MapUpdaterGuardIfNeeded() {
+ ptr_->map_updater_mutex_depth_--;
+ DCHECK_EQ(initial_map_updater_mutex_depth_,
+ ptr_->map_updater_mutex_depth_);
+ }
+
+ // Whether the MapUpdater mutex should be physically locked (if not, we
+ // already hold the lock).
+ bool should_lock() const { return initial_map_updater_mutex_depth_ == 0; }
+
+ private:
+ JSHeapBroker* const ptr_;
+ const int initial_map_updater_mutex_depth_;
+ base::SharedMutexGuardIf<base::kShared> shared_mutex;
+ };
+
private:
friend class HeapObjectRef;
friend class ObjectRef;
@@ -323,6 +390,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// thus safe to read from a memory safety perspective. The converse does not
// necessarily hold.
bool ObjectMayBeUninitialized(Handle<Object> object) const;
+ bool ObjectMayBeUninitialized(HeapObject object) const;
bool CanUseFeedback(const FeedbackNexus& nexus) const;
const ProcessedFeedback& NewInsufficientFeedback(FeedbackSlotKind kind) const;
@@ -427,9 +495,19 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
};
ZoneMultimap<SerializedFunction, HintsVector> serialized_functions_;
- static const size_t kMaxSerializedFunctionsCacheSize = 200;
- static const uint32_t kMinimalRefsBucketCount = 8; // must be power of 2
- static const uint32_t kInitialRefsBucketCount = 1024; // must be power of 2
+ // The MapUpdater mutex is used in recursive patterns; for example,
+ // ComputePropertyAccessInfo may call itself recursively. Thus we need to
+ // emulate a recursive mutex, which we do by checking if this heap broker
+ // instance already holds the mutex when a lock is requested. This field
+ // holds the locking depth, i.e. how many times the mutex has been
+ // recursively locked. Only the outermost locker actually locks underneath.
+ int map_updater_mutex_depth_ = 0;
+
+ static constexpr size_t kMaxSerializedFunctionsCacheSize = 200;
+ static constexpr uint32_t kMinimalRefsBucketCount = 8;
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kMinimalRefsBucketCount));
+ static constexpr uint32_t kInitialRefsBucketCount = 1024;
+ STATIC_ASSERT(base::bits::IsPowerOfTwo(kInitialRefsBucketCount));
};
class V8_NODISCARD TraceScope {
@@ -490,6 +568,65 @@ class V8_NODISCARD UnparkedScopeIfNeeded {
base::Optional<UnparkedScope> unparked_scope;
};
+// Usage:
+//
+// base::Optional<FooRef> ref = TryMakeRef(broker, o);
+// if (!ref.has_value()) return {}; // bailout
+//
+// or
+//
+// FooRef ref = MakeRef(broker, o);
+template <class T,
+ typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
+base::Optional<typename ref_traits<T>::ref_type> TryMakeRef(
+ JSHeapBroker* broker, T object, GetOrCreateDataFlags flags = {}) {
+ ObjectData* data = broker->TryGetOrCreateData(object, flags);
+ if (data == nullptr) {
+ TRACE_BROKER_MISSING(broker, "ObjectData for " << Brief(object));
+ return {};
+ }
+ return {typename ref_traits<T>::ref_type(broker, data)};
+}
+
+template <class T,
+ typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
+base::Optional<typename ref_traits<T>::ref_type> TryMakeRef(
+ JSHeapBroker* broker, Handle<T> object, GetOrCreateDataFlags flags = {}) {
+ ObjectData* data = broker->TryGetOrCreateData(object, flags);
+ if (data == nullptr) {
+ TRACE_BROKER_MISSING(broker, "ObjectData for " << Brief(*object));
+ return {};
+ }
+ return {typename ref_traits<T>::ref_type(broker, data)};
+}
+
+template <class T,
+ typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
+typename ref_traits<T>::ref_type MakeRef(JSHeapBroker* broker, T object) {
+ return TryMakeRef(broker, object).value();
+}
+
+template <class T,
+ typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
+typename ref_traits<T>::ref_type MakeRef(JSHeapBroker* broker,
+ Handle<T> object) {
+ return TryMakeRef(broker, object).value();
+}
+
+template <class T,
+ typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
+typename ref_traits<T>::ref_type MakeRefAssumeMemoryFence(JSHeapBroker* broker,
+ T object) {
+ return TryMakeRef(broker, object, kAssumeMemoryFence).value();
+}
+
+template <class T,
+ typename = std::enable_if_t<std::is_convertible<T*, Object*>::value>>
+typename ref_traits<T>::ref_type MakeRefAssumeMemoryFence(JSHeapBroker* broker,
+ Handle<T> object) {
+ return TryMakeRef(broker, object, kAssumeMemoryFence).value();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/compiler/js-heap-copy-reducer.cc b/chromium/v8/src/compiler/js-heap-copy-reducer.cc
index 3b45b9d82b4..b8f6274741f 100644
--- a/chromium/v8/src/compiler/js-heap-copy-reducer.cc
+++ b/chromium/v8/src/compiler/js-heap-copy-reducer.cc
@@ -28,7 +28,7 @@ JSHeapBroker* JSHeapCopyReducer::broker() { return broker_; }
Reduction JSHeapCopyReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kCheckClosure: {
- FeedbackCellRef cell(broker(), FeedbackCellOf(node->op()));
+ FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(node->op()));
base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
if (feedback_vector.has_value()) {
feedback_vector->Serialize();
@@ -36,7 +36,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
break;
}
case IrOpcode::kHeapConstant: {
- ObjectRef object(broker(), HeapConstantOf(node->op()));
+ ObjectRef object = MakeRef(broker(), HeapConstantOf(node->op()));
if (object.IsJSFunction()) object.AsJSFunction().Serialize();
if (object.IsJSObject()) {
object.AsJSObject().SerializeObjectCreateMap();
@@ -49,34 +49,33 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSCreateArray: {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
Handle<AllocationSite> site;
- if (p.site().ToHandle(&site)) AllocationSiteRef(broker(), site);
+ if (p.site().ToHandle(&site)) MakeRef(broker(), site);
break;
}
case IrOpcode::kJSCreateArguments: {
Node* const frame_state = NodeProperties::GetFrameStateInput(node);
FrameStateInfo state_info = FrameStateInfoOf(frame_state->op());
- SharedFunctionInfoRef shared(broker(),
- state_info.shared_info().ToHandleChecked());
+ MakeRef(broker(), state_info.shared_info().ToHandleChecked());
break;
}
case IrOpcode::kJSCreateBlockContext: {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+ MakeRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSCreateBoundFunction: {
CreateBoundFunctionParameters const& p =
CreateBoundFunctionParametersOf(node->op());
- MapRef(broker(), p.map());
+ MakeRef(broker(), p.map());
break;
}
case IrOpcode::kJSCreateCatchContext: {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+ MakeRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSCreateClosure: {
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
- SharedFunctionInfoRef(broker(), p.shared_info());
- HeapObjectRef(broker(), p.code());
+ MakeRef(broker(), p.shared_info());
+ MakeRef(broker(), p.code());
break;
}
case IrOpcode::kJSCreateEmptyLiteralArray: {
@@ -133,7 +132,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSCreateFunctionContext: {
CreateFunctionContextParameters const& p =
CreateFunctionContextParametersOf(node->op());
- ScopeInfoRef(broker(), p.scope_info());
+ MakeRef(broker(), p.scope_info());
break;
}
case IrOpcode::kJSCreateLiteralArray:
@@ -154,18 +153,18 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
case IrOpcode::kJSGetTemplateObject: {
GetTemplateObjectParameters const& p =
GetTemplateObjectParametersOf(node->op());
- SharedFunctionInfoRef(broker(), p.shared());
- TemplateObjectDescriptionRef(broker(), p.description());
+ MakeRef(broker(), p.shared());
+ MakeRef(broker(), p.description());
broker()->ProcessFeedbackForTemplateObject(p.feedback());
break;
}
case IrOpcode::kJSCreateWithContext: {
- ScopeInfoRef(broker(), ScopeInfoOf(node->op()));
+ MakeRef(broker(), ScopeInfoOf(node->op()));
break;
}
case IrOpcode::kJSLoadNamed: {
NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name(broker(), p.name());
+ NameRef name = MakeRef(broker(), p.name());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
AccessMode::kLoad, name);
@@ -174,7 +173,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
}
case IrOpcode::kJSLoadNamedFromSuper: {
NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name(broker(), p.name());
+ NameRef name = MakeRef(broker(), p.name());
if (p.feedback().IsValid()) {
broker()->ProcessFeedbackForPropertyAccess(p.feedback(),
AccessMode::kLoad, name);
@@ -183,7 +182,7 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
}
case IrOpcode::kJSStoreNamed: {
NamedAccess const& p = NamedAccessOf(node->op());
- NameRef name(broker(), p.name());
+ MakeRef(broker(), p.name());
break;
}
case IrOpcode::kStoreField:
@@ -191,32 +190,32 @@ Reduction JSHeapCopyReducer::Reduce(Node* node) {
FieldAccess access = FieldAccessOf(node->op());
Handle<Map> map_handle;
if (access.map.ToHandle(&map_handle)) {
- MapRef(broker(), map_handle);
+ MakeRef(broker(), map_handle);
}
Handle<Name> name_handle;
if (access.name.ToHandle(&name_handle)) {
- NameRef(broker(), name_handle);
+ MakeRef(broker(), name_handle);
}
break;
}
case IrOpcode::kMapGuard: {
ZoneHandleSet<Map> const& maps = MapGuardMapsOf(node->op());
for (Handle<Map> map : maps) {
- MapRef(broker(), map);
+ MakeRef(broker(), map);
}
break;
}
case IrOpcode::kCheckMaps: {
ZoneHandleSet<Map> const& maps = CheckMapsParametersOf(node->op()).maps();
for (Handle<Map> map : maps) {
- MapRef(broker(), map);
+ MakeRef(broker(), map);
}
break;
}
case IrOpcode::kCompareMaps: {
ZoneHandleSet<Map> const& maps = CompareMapsParametersOf(node->op());
for (Handle<Map> map : maps) {
- MapRef(broker(), map);
+ MakeRef(broker(), map);
}
break;
}
diff --git a/chromium/v8/src/compiler/js-inlining-heuristic.cc b/chromium/v8/src/compiler/js-inlining-heuristic.cc
index 5777719107c..8115f8e565a 100644
--- a/chromium/v8/src/compiler/js-inlining-heuristic.cc
+++ b/chromium/v8/src/compiler/js-inlining-heuristic.cc
@@ -110,7 +110,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
}
if (m.IsCheckClosure()) {
DCHECK(!out.functions[0].has_value());
- FeedbackCellRef feedback_cell(broker(), FeedbackCellOf(m.op()));
+ FeedbackCellRef feedback_cell = MakeRef(broker(), FeedbackCellOf(m.op()));
SharedFunctionInfoRef shared_info = *feedback_cell.shared_function_info();
out.shared_info = shared_info;
if (CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
@@ -124,7 +124,7 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
JSCreateClosureNode n(callee);
CreateClosureParameters const& p = n.Parameters();
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- SharedFunctionInfoRef shared_info(broker(), p.shared_info());
+ SharedFunctionInfoRef shared_info = MakeRef(broker(), p.shared_info());
out.shared_info = shared_info;
if (feedback_cell.value().has_value() &&
CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
diff --git a/chromium/v8/src/compiler/js-inlining.cc b/chromium/v8/src/compiler/js-inlining.cc
index 6e64f2b6777..e55efde5d87 100644
--- a/chromium/v8/src/compiler/js-inlining.cc
+++ b/chromium/v8/src/compiler/js-inlining.cc
@@ -335,7 +335,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
FeedbackCellRef cell = n.GetFeedbackCellRefChecked(broker());
return cell.shared_function_info();
} else if (match.IsCheckClosure()) {
- FeedbackCellRef cell(broker(), FeedbackCellOf(match.op()));
+ FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(match.op()));
return cell.shared_function_info();
}
@@ -373,7 +373,7 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
*context_out = NodeProperties::GetContextInput(match.node());
return cell;
} else if (match.IsCheckClosure()) {
- FeedbackCellRef cell(broker(), FeedbackCellOf(match.op()));
+ FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(match.op()));
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -475,7 +475,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
if (!shared_info.has_value()) return NoChange();
DCHECK(shared_info->IsInlineable());
- SharedFunctionInfoRef outer_shared_info(broker(), info_->shared_info());
+ SharedFunctionInfoRef outer_shared_info =
+ MakeRef(broker(), info_->shared_info());
// Constructor must be constructable.
if (node->opcode() == IrOpcode::kJSConstruct &&
diff --git a/chromium/v8/src/compiler/js-intrinsic-lowering.cc b/chromium/v8/src/compiler/js-intrinsic-lowering.cc
index 449136051bc..803d55cc271 100644
--- a/chromium/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/chromium/v8/src/compiler/js-intrinsic-lowering.cc
@@ -30,11 +30,15 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
if (node->opcode() != IrOpcode::kJSCallRuntime) return NoChange();
const Runtime::Function* const f =
Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
- if (f->function_id == Runtime::kTurbofanStaticAssert) {
- return ReduceTurbofanStaticAssert(node);
- }
- if (f->function_id == Runtime::kIsBeingInterpreted) {
- return ReduceIsBeingInterpreted(node);
+ switch (f->function_id) {
+ case Runtime::kIsBeingInterpreted:
+ return ReduceIsBeingInterpreted(node);
+ case Runtime::kTurbofanStaticAssert:
+ return ReduceTurbofanStaticAssert(node);
+ case Runtime::kVerifyType:
+ return ReduceVerifyType(node);
+ default:
+ break;
}
if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
switch (f->function_id) {
@@ -80,8 +84,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToLength(node);
case Runtime::kInlineToObject:
return ReduceToObject(node);
- case Runtime::kInlineToString:
- return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
case Runtime::kInlineIncBlockCounter:
@@ -290,6 +292,10 @@ Reduction JSIntrinsicLowering::ReduceTurbofanStaticAssert(Node* node) {
return Changed(jsgraph_->UndefinedConstant());
}
+Reduction JSIntrinsicLowering::ReduceVerifyType(Node* node) {
+ return Change(node, simplified()->VerifyType());
+}
+
Reduction JSIntrinsicLowering::ReduceIsBeingInterpreted(Node* node) {
RelaxEffectsAndControls(node);
return Changed(jsgraph_->FalseConstant());
diff --git a/chromium/v8/src/compiler/js-intrinsic-lowering.h b/chromium/v8/src/compiler/js-intrinsic-lowering.h
index 3deeb5685e7..f2d4dbccf2c 100644
--- a/chromium/v8/src/compiler/js-intrinsic-lowering.h
+++ b/chromium/v8/src/compiler/js-intrinsic-lowering.h
@@ -60,6 +60,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceIsSmi(Node* node);
Reduction ReduceIsBeingInterpreted(Node* node);
Reduction ReduceTurbofanStaticAssert(Node* node);
+ Reduction ReduceVerifyType(Node* node);
Reduction ReduceToLength(Node* node);
Reduction ReduceToObject(Node* node);
Reduction ReduceToString(Node* node);
diff --git a/chromium/v8/src/compiler/js-native-context-specialization.cc b/chromium/v8/src/compiler/js-native-context-specialization.cc
index 3d9290a0bf9..8c372d34534 100644
--- a/chromium/v8/src/compiler/js-native-context-specialization.cc
+++ b/chromium/v8/src/compiler/js-native-context-specialization.cc
@@ -10,6 +10,7 @@
#include "src/codegen/string-constants.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/access-info.h"
+#include "src/compiler/allocation-builder-inl.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/js-graph.h"
@@ -20,7 +21,6 @@
#include "src/compiler/property-access-builder.h"
#include "src/compiler/type-cache.h"
#include "src/execution/isolate-inl.h"
-#include "src/numbers/dtoa.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/field-index-inl.h"
#include "src/objects/heap-number.h"
@@ -36,7 +36,7 @@ namespace {
bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
- MapRef map_ref(broker, map);
+ MapRef map_ref = MakeRef(broker, map);
if (map_ref.IsHeapNumberMap()) return true;
}
return false;
@@ -45,7 +45,7 @@ bool HasNumberMaps(JSHeapBroker* broker, ZoneVector<Handle<Map>> const& maps) {
bool HasOnlyJSArrayMaps(JSHeapBroker* broker,
ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
- MapRef map_ref(broker, map);
+ MapRef map_ref = MakeRef(broker, map);
if (!map_ref.IsJSArrayMap()) return false;
}
return true;
@@ -139,7 +139,7 @@ base::Optional<size_t> JSNativeContextSpecialization::GetMaxStringLength(
NumberMatcher number_matcher(node);
if (number_matcher.HasResolvedValue()) {
- return kBase10MaximalLength + 1;
+ return kMaxDoubleStringLength;
}
// We don't support objects with possibly monkey-patched prototype.toString
@@ -228,12 +228,17 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
// Create the JSAsyncFunctionObject based on the SharedFunctionInfo
// extracted from the top-most frame in {frame_state}.
- SharedFunctionInfoRef shared(
+ SharedFunctionInfoRef shared = MakeRef(
broker(),
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked());
DCHECK(shared.is_compiled());
int register_count = shared.internal_formal_parameter_count() +
shared.GetBytecodeArray().register_count();
+ MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
+ AllocationBuilder ab(jsgraph(), effect, control);
+ if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
+ return NoChange();
+ }
Node* value = effect =
graph()->NewNode(javascript()->CreateAsyncFunctionObject(register_count),
closure, receiver, promise, context, effect, control);
@@ -359,19 +364,15 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
}
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
MapRef function_map = function.map();
- if (function_map.ShouldHaveBeenSerialized() &&
- !function_map.serialized_prototype()) {
- TRACE_BROKER_MISSING(broker(), "data for map " << function_map);
- return NoChange();
- }
- HeapObjectRef function_prototype = function_map.prototype();
+ base::Optional<HeapObjectRef> function_prototype = function_map.prototype();
+ if (!function_prototype.has_value()) return NoChange();
// We can constant-fold the super constructor access if the
// {function}s map is stable, i.e. we can use a code dependency
// to guard against [[Prototype]] changes of {function}.
if (function_map.is_stable()) {
dependencies()->DependOnStableMap(function_map);
- Node* value = jsgraph()->Constant(function_prototype);
+ Node* value = jsgraph()->Constant(*function_prototype);
ReplaceWithValue(node, value);
return Replace(value);
}
@@ -407,15 +408,15 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
- JSObjectRef receiver_ref(broker(), receiver);
+ JSObjectRef receiver_ref = MakeRef(broker(), receiver);
MapRef receiver_map = receiver_ref.map();
PropertyAccessInfo access_info = PropertyAccessInfo::Invalid(graph()->zone());
if (broker()->is_concurrent_inlining()) {
access_info = broker()->GetPropertyAccessInfo(
receiver_map,
- NameRef(broker(), isolate()->factory()->has_instance_symbol()),
- AccessMode::kLoad);
+ MakeRef(broker(), isolate()->factory()->has_instance_symbol()),
+ AccessMode::kLoad, dependencies());
} else {
AccessInfoFactory access_info_factory(broker(), dependencies(),
graph()->zone());
@@ -457,7 +458,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
Handle<JSObject> holder;
bool found_on_proto = access_info.holder().ToHandle(&holder);
JSObjectRef holder_ref =
- found_on_proto ? JSObjectRef(broker(), holder) : receiver_ref;
+ found_on_proto ? MakeRef(broker(), holder) : receiver_ref;
base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
access_info.field_representation(), access_info.field_index());
if (!constant.has_value() || !constant->IsHeapObject() ||
@@ -467,7 +468,7 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
if (found_on_proto) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- JSObjectRef(broker(), holder));
+ MakeRef(broker(), holder));
}
// Check that {constructor} is actually {receiver}.
@@ -535,7 +536,7 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
bool all = true;
bool none = true;
for (size_t i = 0; i < receiver_maps.size(); ++i) {
- MapRef map(broker(), receiver_maps[i]);
+ MapRef map = MakeRef(broker(), receiver_maps[i]);
if (result == NodeProperties::kUnreliableMaps && !map.is_stable()) {
return kMayBeInPrototypeChain;
}
@@ -547,15 +548,13 @@ JSNativeContextSpecialization::InferHasInPrototypeChain(
all = false;
break;
}
- if (map.ShouldHaveBeenSerialized() && !map.serialized_prototype()) {
- TRACE_BROKER_MISSING(broker(), "prototype data for map " << map);
- return kMayBeInPrototypeChain;
- }
- if (map.prototype().equals(prototype)) {
+ base::Optional<HeapObjectRef> map_prototype = map.prototype();
+ if (!map_prototype.has_value()) return kMayBeInPrototypeChain;
+ if (map_prototype->equals(prototype)) {
none = false;
break;
}
- map = map.prototype().map();
+ map = map_prototype->map();
// TODO(v8:11457) Support dictionary mode protoypes here.
if (!map.is_stable() || map.is_dictionary_map())
return kMayBeInPrototypeChain;
@@ -628,10 +627,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// OrdinaryHasInstance on bound functions turns into a recursive invocation
// of the instanceof operator again.
JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for JSBoundFunction " << function);
- return NoChange();
- }
+ if (!function.serialized()) return NoChange();
JSReceiverRef bound_target_function = function.bound_target_function();
@@ -651,10 +647,7 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// Optimize if we currently know the "prototype" property.
JSFunctionRef function = m.Ref(broker()).AsJSFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for JSFunction " << function);
- return NoChange();
- }
+ if (!function.serialized()) return NoChange();
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
@@ -736,10 +729,10 @@ Reduction JSNativeContextSpecialization::ReduceJSResolvePromise(Node* node) {
} else {
// Obtain pre-computed access infos from the broker.
for (auto map : resolution_maps) {
- MapRef map_ref(broker(), map);
+ MapRef map_ref = MakeRef(broker(), map);
access_infos.push_back(broker()->GetPropertyAccessInfo(
- map_ref, NameRef(broker(), isolate()->factory()->then_string()),
- AccessMode::kLoad));
+ map_ref, MakeRef(broker(), isolate()->factory()->then_string()),
+ AccessMode::kLoad, dependencies()));
}
}
PropertyAccessInfo access_info =
@@ -865,7 +858,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
simplified()->CheckMaps(
CheckMapsFlag::kNone,
ZoneHandleSet<Map>(
- HeapObjectRef(broker(), global_proxy()).map().object())),
+ MakeRef(broker(), global_proxy()).map().object())),
lookup_start_object, effect, control);
}
@@ -1035,7 +1028,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
return Replace(value);
} else if (feedback.IsPropertyCell()) {
return ReduceGlobalAccess(node, nullptr, nullptr, nullptr,
- NameRef(broker(), p.name()), AccessMode::kLoad,
+ MakeRef(broker(), p.name()), AccessMode::kLoad,
nullptr, feedback.property_cell());
} else {
DCHECK(feedback.IsMegamorphic());
@@ -1066,7 +1059,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
return Replace(value);
} else if (feedback.IsPropertyCell()) {
return ReduceGlobalAccess(node, nullptr, nullptr, value,
- NameRef(broker(), p.name()), AccessMode::kStore,
+ MakeRef(broker(), p.name()), AccessMode::kStore,
nullptr, feedback.property_cell());
} else {
DCHECK(feedback.IsMegamorphic());
@@ -1188,7 +1181,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// contexts' global proxy, and turn that into a direct access to the
// corresponding global object instead.
if (lookup_start_object_maps.size() == 1) {
- MapRef lookup_start_object_map(broker(), lookup_start_object_maps[0]);
+ MapRef lookup_start_object_map =
+ MakeRef(broker(), lookup_start_object_maps[0]);
if (lookup_start_object_map.equals(
broker()->target_native_context().global_proxy_object().map()) &&
!broker()->target_native_context().global_object().IsDetached()) {
@@ -1201,7 +1195,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
{
ZoneVector<PropertyAccessInfo> access_infos_for_feedback(zone());
for (Handle<Map> map_handle : lookup_start_object_maps) {
- MapRef map(broker(), map_handle);
+ MapRef map = MakeRef(broker(), map_handle);
if (map.is_deprecated()) continue;
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
map, feedback.name(), access_mode, dependencies(),
@@ -1468,7 +1462,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
JSLoadNamedNode n(node);
NamedAccess const& p = n.Parameters();
Node* const receiver = n.object();
- NameRef name(broker(), p.name());
+ NameRef name = MakeRef(broker(), p.name());
// Check if we have a constant receiver.
HeapObjectMatcher m(receiver);
@@ -1478,10 +1472,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
name.equals(ObjectRef(broker(), factory()->prototype_string()))) {
// Optimize "prototype" property of functions.
JSFunctionRef function = object.AsJSFunction();
- if (function.ShouldHaveBeenSerialized() && !function.serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for function " << function);
- return NoChange();
- }
+ if (!function.serialized()) return NoChange();
// TODO(neis): Remove the has_prototype_slot condition once the broker is
// always enabled.
if (!function.map().has_prototype_slot() || !function.has_prototype() ||
@@ -1511,7 +1502,7 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamedFromSuper(
Node* node) {
JSLoadNamedFromSuperNode n(node);
NamedAccess const& p = n.Parameters();
- NameRef name(broker(), p.name());
+ NameRef name = MakeRef(broker(), p.name());
if (!p.feedback().IsValid()) return NoChange();
return ReducePropertyAccess(node, nullptr, name, jsgraph()->Dead(),
@@ -1594,7 +1585,7 @@ Reduction JSNativeContextSpecialization::ReduceJSGetIterator(Node* node) {
const Operator* call_op = javascript()->Call(
JSCallNode::ArityForArgc(0), CallFrequency(), p.callFeedback(),
ConvertReceiverMode::kNotNullOrUndefined, mode,
- CallFeedbackRelation::kRelated);
+ CallFeedbackRelation::kTarget);
Node* call_property =
graph()->NewNode(call_op, load_property, receiver, n.feedback_vector(),
context, frame_state, effect, control);
@@ -1606,7 +1597,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamed(Node* node) {
JSStoreNamedNode n(node);
NamedAccess const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()),
+ return ReducePropertyAccess(node, nullptr, MakeRef(broker(), p.name()),
n.value(), FeedbackSource(p.feedback()),
AccessMode::kStore);
}
@@ -1615,7 +1606,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
JSStoreNamedOwnNode n(node);
StoreNamedOwnParameters const& p = n.Parameters();
if (!p.feedback().IsValid()) return NoChange();
- return ReducePropertyAccess(node, nullptr, NameRef(broker(), p.name()),
+ return ReducePropertyAccess(node, nullptr, MakeRef(broker(), p.name()),
n.value(), FeedbackSource(p.feedback()),
AccessMode::kStoreInLiteral);
}
@@ -1669,7 +1660,7 @@ void JSNativeContextSpecialization::RemoveImpossibleMaps(
maps->erase(
std::remove_if(maps->begin(), maps->end(),
[root_map, this](Handle<Map> map) {
- MapRef map_ref(broker(), map);
+ MapRef map_ref = MakeRef(broker(), map);
return map_ref.is_abandoned_prototype_map() ||
(map_ref.FindRootMap().has_value() &&
!map_ref.FindRootMap()->equals(*root_map));
@@ -1758,7 +1749,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
ZoneVector<MapRef> prototype_maps(zone());
for (ElementAccessInfo const& access_info : access_infos) {
for (Handle<Map> map : access_info.lookup_start_object_maps()) {
- MapRef receiver_map(broker(), map);
+ MapRef receiver_map = MakeRef(broker(), map);
// If the {receiver_map} has a prototype and its elements backing
// store is either holey, or we have a potentially growing store,
// then we need to check that all prototypes have stable maps with
@@ -1790,12 +1781,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
base::Optional<JSTypedArrayRef> typed_array =
GetTypedArrayConstant(broker(), receiver);
- if (typed_array.has_value()) {
- if (typed_array->ShouldHaveBeenSerialized() &&
- !typed_array->serialized()) {
- TRACE_BROKER_MISSING(broker(), "data for typed array " << *typed_array);
- return NoChange();
- }
+ if (typed_array.has_value() && !typed_array->serialized()) {
+ return NoChange();
}
}
@@ -1805,11 +1792,11 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
ElementAccessInfo access_info = access_infos.front();
// Perform possible elements kind transitions.
- MapRef transition_target(broker(),
- access_info.lookup_start_object_maps().front());
+ MapRef transition_target =
+ MakeRef(broker(), access_info.lookup_start_object_maps().front());
for (auto source : access_info.transition_sources()) {
DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
- MapRef transition_source(broker(), source);
+ MapRef transition_source = MakeRef(broker(), source);
effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
IsSimpleMapChangeTransition(transition_source.elements_kind(),
@@ -1857,10 +1844,10 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Node* this_control = fallthrough_control;
// Perform possible elements kind transitions.
- MapRef transition_target(broker(),
- access_info.lookup_start_object_maps().front());
+ MapRef transition_target =
+ MakeRef(broker(), access_info.lookup_start_object_maps().front());
for (auto source : access_info.transition_sources()) {
- MapRef transition_source(broker(), source);
+ MapRef transition_source = MakeRef(broker(), source);
DCHECK_EQ(access_info.lookup_start_object_maps().size(), 1);
this_effect = graph()->NewNode(
simplified()->TransitionElementsKind(ElementsTransition(
@@ -2229,7 +2216,7 @@ Node* JSNativeContextSpecialization::InlinePropertyGetterCall(
// For fast mode holders we recorded dependencies in BuildPropertyLoad.
for (const Handle<Map> map : access_info.lookup_start_object_maps()) {
dependencies()->DependOnConstantInDictionaryPrototypeChain(
- MapRef{broker(), map}, NameRef{broker(), access_info.name()},
+ MakeRef(broker(), map), MakeRef(broker(), access_info.name()),
constant, PropertyKind::kAccessor);
}
}
@@ -2362,7 +2349,7 @@ JSNativeContextSpecialization::BuildPropertyLoad(
!access_info.HasDictionaryHolder()) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- JSObjectRef(broker(), holder));
+ MakeRef(broker(), holder));
}
// Generate the actual property access.
@@ -2413,7 +2400,7 @@ JSNativeContextSpecialization::BuildPropertyTest(
if (access_info.holder().ToHandle(&holder)) {
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- JSObjectRef(broker(), holder));
+ MakeRef(broker(), holder));
}
Node* value = access_info.IsNotFound() ? jsgraph()->FalseConstant()
@@ -2457,7 +2444,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
dependencies()->DependOnStablePrototypeChains(
access_info.lookup_start_object_maps(), kStartAtPrototype,
- JSObjectRef(broker(), holder));
+ MakeRef(broker(), holder));
}
DCHECK(!access_info.IsNotFound());
@@ -2508,7 +2495,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
a.Allocate(HeapNumber::kSize, AllocationType::kYoung,
Type::OtherInternal());
a.Store(AccessBuilder::ForMap(),
- MapRef(broker(), factory()->heap_number_map()));
+ MakeRef(broker(), factory()->heap_number_map()));
FieldAccess value_field_access = AccessBuilder::ForHeapNumberValue();
value_field_access.const_field_info = field_access.const_field_info;
a.Store(value_field_access, value);
@@ -2606,6 +2593,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
+ case MachineRepresentation::kMapWord:
UNREACHABLE();
break;
}
@@ -2614,7 +2602,7 @@ JSNativeContextSpecialization::BuildPropertyStore(
if (access_info.transition_map().ToHandle(&transition_map)) {
// Check if we need to grow the properties backing store
// with this transitioning store.
- MapRef transition_map_ref(broker(), transition_map);
+ MapRef transition_map_ref = MakeRef(broker(), transition_map);
MapRef original_map = transition_map_ref.GetBackPointer().AsMap();
if (original_map.UnusedPropertyFields() == 0) {
DCHECK(!field_index.is_inobject());
@@ -3464,8 +3452,8 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
// or Object.prototype objects as their prototype (in any of the current
// native contexts, as the global Array protector works isolate-wide).
for (Handle<Map> map : receiver_maps) {
- MapRef receiver_map(broker(), map);
- ObjectRef receiver_prototype = receiver_map.prototype();
+ MapRef receiver_map = MakeRef(broker(), map);
+ ObjectRef receiver_prototype = receiver_map.prototype().value();
if (!receiver_prototype.IsJSObject() ||
!broker()->IsArrayOrObjectPrototype(receiver_prototype.AsJSObject())) {
return false;
@@ -3490,7 +3478,7 @@ bool JSNativeContextSpecialization::InferMaps(
// For untrusted maps, we can still use the information
// if the maps are stable.
for (size_t i = 0; i < map_set.size(); ++i) {
- MapRef map(broker(), map_set[i]);
+ MapRef map = MakeRef(broker(), map_set[i]);
if (!map.is_stable()) return false;
}
for (size_t i = 0; i < map_set.size(); ++i) {
diff --git a/chromium/v8/src/compiler/js-operator.cc b/chromium/v8/src/compiler/js-operator.cc
index 4f491429f48..18380f4abd0 100644
--- a/chromium/v8/src/compiler/js-operator.cc
+++ b/chromium/v8/src/compiler/js-operator.cc
@@ -8,6 +8,7 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator.h"
#include "src/handles/handles-inl.h"
@@ -22,7 +23,7 @@ namespace {
// Returns properties for the given binary op.
constexpr Operator::Properties BinopProperties(Operator::Opcode opcode) {
- CONSTEXPR_DCHECK(JSOperator::IsBinaryWithFeedback(opcode));
+ DCHECK(JSOperator::IsBinaryWithFeedback(opcode));
return opcode == IrOpcode::kJSStrictEqual ? Operator::kPure
: Operator::kNoProperties;
}
@@ -41,7 +42,7 @@ FeedbackCellRef JSCreateClosureNode::GetFeedbackCellRefChecked(
JSHeapBroker* broker) const {
HeapObjectMatcher m(feedback_cell());
CHECK(m.HasResolvedValue());
- return FeedbackCellRef(broker, m.ResolvedValue());
+ return MakeRef(broker, Handle<FeedbackCell>::cast(m.ResolvedValue()));
}
std::ostream& operator<<(std::ostream& os, CallFrequency const& f) {
@@ -338,12 +339,13 @@ bool operator!=(LoadGlobalParameters const& lhs,
size_t hash_value(LoadGlobalParameters const& p) {
- return base::hash_combine(p.name().location(), p.typeof_mode());
+ return base::hash_combine(p.name().location(),
+ static_cast<int>(p.typeof_mode()));
}
std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) {
- return os << Brief(*p.name()) << ", " << p.typeof_mode();
+ return os << Brief(*p.name()) << ", " << static_cast<int>(p.typeof_mode());
}
diff --git a/chromium/v8/src/compiler/js-operator.h b/chromium/v8/src/compiler/js-operator.h
index 4e447149bc6..8080d4caefe 100644
--- a/chromium/v8/src/compiler/js-operator.h
+++ b/chromium/v8/src/compiler/js-operator.h
@@ -290,9 +290,9 @@ class CallParameters final {
}
using ArityField = base::BitField<size_t, 0, 27>;
- using CallFeedbackRelationField = base::BitField<CallFeedbackRelation, 27, 1>;
- using SpeculationModeField = base::BitField<SpeculationMode, 28, 1>;
- using ConvertReceiverModeField = base::BitField<ConvertReceiverMode, 29, 2>;
+ using CallFeedbackRelationField = base::BitField<CallFeedbackRelation, 27, 2>;
+ using SpeculationModeField = base::BitField<SpeculationMode, 29, 1>;
+ using ConvertReceiverModeField = base::BitField<ConvertReceiverMode, 30, 2>;
uint32_t const bit_field_;
CallFrequency const frequency_;
@@ -308,7 +308,7 @@ const CallParameters& CallParametersOf(const Operator* op);
// Defines the arity and the ID for a runtime function call. This is used as a
// parameter by JSCallRuntime operators.
-class CallRuntimeParameters final {
+class V8_EXPORT_PRIVATE CallRuntimeParameters final {
public:
CallRuntimeParameters(Runtime::FunctionId id, size_t arity)
: id_(id), arity_(arity) {}
@@ -328,8 +328,8 @@ size_t hash_value(CallRuntimeParameters const&);
std::ostream& operator<<(std::ostream&, CallRuntimeParameters const&);
-const CallRuntimeParameters& CallRuntimeParametersOf(const Operator* op);
-
+V8_EXPORT_PRIVATE const CallRuntimeParameters& CallRuntimeParametersOf(
+ const Operator* op);
// Defines the location of a context slot relative to a specific scope. This is
// used as a parameter by JSLoadContext and JSStoreContext operators and allows
@@ -951,12 +951,12 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
CallFrequency const& frequency,
const FeedbackSource& feedback = FeedbackSource{},
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation,
- CallFeedbackRelation feedback_relation = CallFeedbackRelation::kRelated);
+ CallFeedbackRelation feedback_relation = CallFeedbackRelation::kTarget);
const Operator* CallWithSpread(
uint32_t arity, CallFrequency const& frequency = CallFrequency(),
FeedbackSource const& feedback = FeedbackSource(),
SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation,
- CallFeedbackRelation feedback_relation = CallFeedbackRelation::kRelated);
+ CallFeedbackRelation feedback_relation = CallFeedbackRelation::kTarget);
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
@@ -1002,7 +1002,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* LoadGlobal(const Handle<Name>& name,
const FeedbackSource& feedback,
- TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ TypeofMode typeof_mode = TypeofMode::kNotInside);
const Operator* StoreGlobal(LanguageMode language_mode,
const Handle<Name>& name,
const FeedbackSource& feedback);
@@ -1112,7 +1112,7 @@ class JSNodeWrapperBase : public NodeWrapper {
class JSUnaryOpNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSUnaryOpNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(JSOperator::IsUnaryWithFeedback(node->opcode()));
+ DCHECK(JSOperator::IsUnaryWithFeedback(node->opcode()));
}
#define INPUTS(V) \
@@ -1129,7 +1129,7 @@ JS_UNOP_WITH_FEEDBACK(V)
class JSBinaryOpNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSBinaryOpNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(JSOperator::IsBinaryWithFeedback(node->opcode()));
+ DCHECK(JSOperator::IsBinaryWithFeedback(node->opcode()));
}
const FeedbackParameter& Parameters() const {
@@ -1151,7 +1151,7 @@ JS_BINOP_WITH_FEEDBACK(V)
class JSGetIteratorNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSGetIteratorNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSGetIterator);
+ DCHECK_EQ(IrOpcode::kJSGetIterator, node->opcode());
}
const GetIteratorParameters& Parameters() const {
@@ -1168,7 +1168,7 @@ class JSGetIteratorNode final : public JSNodeWrapperBase {
class JSCloneObjectNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSCloneObjectNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSCloneObject);
+ DCHECK_EQ(IrOpcode::kJSCloneObject, node->opcode());
}
const CloneObjectParameters& Parameters() const {
@@ -1186,7 +1186,7 @@ class JSGetTemplateObjectNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSGetTemplateObjectNode(Node* node)
: JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSGetTemplateObject);
+ DCHECK_EQ(IrOpcode::kJSGetTemplateObject, node->opcode());
}
const GetTemplateObjectParameters& Parameters() const {
@@ -1202,9 +1202,9 @@ class JSCreateLiteralOpNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSCreateLiteralOpNode(Node* node)
: JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
- node->opcode() == IrOpcode::kJSCreateLiteralObject ||
- node->opcode() == IrOpcode::kJSCreateLiteralRegExp);
+ DCHECK(node->opcode() == IrOpcode::kJSCreateLiteralArray ||
+ node->opcode() == IrOpcode::kJSCreateLiteralObject ||
+ node->opcode() == IrOpcode::kJSCreateLiteralRegExp);
}
const CreateLiteralParameters& Parameters() const {
@@ -1223,7 +1223,7 @@ using JSCreateLiteralRegExpNode = JSCreateLiteralOpNode;
class JSHasPropertyNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSHasPropertyNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSHasProperty);
+ DCHECK_EQ(IrOpcode::kJSHasProperty, node->opcode());
}
const PropertyAccess& Parameters() const {
@@ -1241,7 +1241,7 @@ class JSHasPropertyNode final : public JSNodeWrapperBase {
class JSLoadPropertyNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSLoadPropertyNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSLoadProperty);
+ DCHECK_EQ(IrOpcode::kJSLoadProperty, node->opcode());
}
const PropertyAccess& Parameters() const {
@@ -1259,7 +1259,7 @@ class JSLoadPropertyNode final : public JSNodeWrapperBase {
class JSStorePropertyNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSStorePropertyNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSStoreProperty);
+ DCHECK_EQ(IrOpcode::kJSStoreProperty, node->opcode());
}
const PropertyAccess& Parameters() const {
@@ -1284,14 +1284,14 @@ class JSCallOrConstructNode : public JSNodeWrapperBase {
public:
explicit constexpr JSCallOrConstructNode(Node* node)
: JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSCall ||
- node->opcode() == IrOpcode::kJSCallWithArrayLike ||
- node->opcode() == IrOpcode::kJSCallWithSpread ||
- node->opcode() == IrOpcode::kJSConstruct ||
- node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
- node->opcode() == IrOpcode::kJSConstructWithSpread
+ DCHECK(node->opcode() == IrOpcode::kJSCall ||
+ node->opcode() == IrOpcode::kJSCallWithArrayLike ||
+ node->opcode() == IrOpcode::kJSCallWithSpread ||
+ node->opcode() == IrOpcode::kJSConstruct ||
+ node->opcode() == IrOpcode::kJSConstructWithArrayLike ||
+ node->opcode() == IrOpcode::kJSConstructWithSpread
#if V8_ENABLE_WEBASSEMBLY
- || node->opcode() == IrOpcode::kJSWasmCall
+ || node->opcode() == IrOpcode::kJSWasmCall
#endif // V8_ENABLE_WEBASSEMBLY
); // NOLINT(whitespace/parens)
}
@@ -1373,7 +1373,7 @@ template <int kOpcode>
class JSCallNodeBase final : public JSCallOrConstructNode {
public:
explicit constexpr JSCallNodeBase(Node* node) : JSCallOrConstructNode(node) {
- CONSTEXPR_DCHECK(node->opcode() == kOpcode);
+ DCHECK_EQ(kOpcode, node->opcode());
}
const CallParameters& Parameters() const {
@@ -1405,7 +1405,7 @@ using JSCallWithArrayLikeNode = JSCallNodeBase<IrOpcode::kJSCallWithArrayLike>;
class JSWasmCallNode final : public JSCallOrConstructNode {
public:
explicit constexpr JSWasmCallNode(Node* node) : JSCallOrConstructNode(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSWasmCall);
+ DCHECK_EQ(IrOpcode::kJSWasmCall, node->opcode());
}
const JSWasmCallParameters& Parameters() const {
@@ -1437,7 +1437,7 @@ class JSConstructNodeBase final : public JSCallOrConstructNode {
public:
explicit constexpr JSConstructNodeBase(Node* node)
: JSCallOrConstructNode(node) {
- CONSTEXPR_DCHECK(node->opcode() == kOpcode);
+ DCHECK_EQ(kOpcode, node->opcode());
}
const ConstructParameters& Parameters() const {
@@ -1470,7 +1470,7 @@ using JSConstructWithArrayLikeNode =
class JSLoadNamedNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSLoadNamedNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSLoadNamed);
+ DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
}
const NamedAccess& Parameters() const { return NamedAccessOf(node()->op()); }
@@ -1486,7 +1486,7 @@ class JSLoadNamedFromSuperNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSLoadNamedFromSuperNode(Node* node)
: JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSLoadNamedFromSuper);
+ DCHECK_EQ(IrOpcode::kJSLoadNamedFromSuper, node->opcode());
}
const NamedAccess& Parameters() const { return NamedAccessOf(node()->op()); }
@@ -1502,7 +1502,7 @@ class JSLoadNamedFromSuperNode final : public JSNodeWrapperBase {
class JSStoreNamedNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSStoreNamedNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSStoreNamed);
+ DCHECK_EQ(IrOpcode::kJSStoreNamed, node->opcode());
}
const NamedAccess& Parameters() const { return NamedAccessOf(node()->op()); }
@@ -1518,7 +1518,7 @@ class JSStoreNamedNode final : public JSNodeWrapperBase {
class JSStoreNamedOwnNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSStoreNamedOwnNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSStoreNamedOwn);
+ DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
}
const StoreNamedOwnParameters& Parameters() const {
@@ -1536,7 +1536,7 @@ class JSStoreNamedOwnNode final : public JSNodeWrapperBase {
class JSStoreGlobalNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSStoreGlobalNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSStoreGlobal);
+ DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
}
const StoreGlobalParameters& Parameters() const {
@@ -1553,7 +1553,7 @@ class JSStoreGlobalNode final : public JSNodeWrapperBase {
class JSLoadGlobalNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSLoadGlobalNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSLoadGlobal);
+ DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
}
const LoadGlobalParameters& Parameters() const {
@@ -1569,7 +1569,7 @@ class JSCreateEmptyLiteralArrayNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSCreateEmptyLiteralArrayNode(Node* node)
: JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSCreateEmptyLiteralArray);
+ DCHECK_EQ(IrOpcode::kJSCreateEmptyLiteralArray, node->opcode());
}
const FeedbackParameter& Parameters() const {
@@ -1585,7 +1585,7 @@ class JSStoreDataPropertyInLiteralNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSStoreDataPropertyInLiteralNode(Node* node)
: JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
+ DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
}
const FeedbackParameter& Parameters() const {
@@ -1606,7 +1606,7 @@ class JSStoreInArrayLiteralNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSStoreInArrayLiteralNode(Node* node)
: JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSStoreInArrayLiteral);
+ DCHECK_EQ(IrOpcode::kJSStoreInArrayLiteral, node->opcode());
}
const FeedbackParameter& Parameters() const {
@@ -1625,7 +1625,7 @@ class JSStoreInArrayLiteralNode final : public JSNodeWrapperBase {
class JSCreateClosureNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSCreateClosureNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSCreateClosure);
+ DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
}
const CreateClosureParameters& Parameters() const {
@@ -1642,7 +1642,7 @@ class JSCreateClosureNode final : public JSNodeWrapperBase {
class JSForInPrepareNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSForInPrepareNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSForInPrepare);
+ DCHECK_EQ(IrOpcode::kJSForInPrepare, node->opcode());
}
const ForInParameters& Parameters() const {
@@ -1659,7 +1659,7 @@ class JSForInPrepareNode final : public JSNodeWrapperBase {
class JSForInNextNode final : public JSNodeWrapperBase {
public:
explicit constexpr JSForInNextNode(Node* node) : JSNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kJSForInNext);
+ DCHECK_EQ(IrOpcode::kJSForInNext, node->opcode());
}
const ForInParameters& Parameters() const {
diff --git a/chromium/v8/src/compiler/js-typed-lowering.cc b/chromium/v8/src/compiler/js-typed-lowering.cc
index 008aacdb392..ebd839acabd 100644
--- a/chromium/v8/src/compiler/js-typed-lowering.cc
+++ b/chromium/v8/src/compiler/js-typed-lowering.cc
@@ -7,6 +7,7 @@
#include "src/ast/modules.h"
#include "src/builtins/builtins-utils.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/allocation-builder.h"
#include "src/compiler/graph-assembler.h"
@@ -592,8 +593,8 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
Node* length =
graph()->NewNode(simplified()->NumberAdd(), left_length, right_length);
- PropertyCellRef string_length_protector(
- broker(), factory()->string_length_protector());
+ PropertyCellRef string_length_protector =
+ MakeRef(broker(), factory()->string_length_protector());
string_length_protector.SerializeAsProtector();
if (string_length_protector.value().AsSmi() ==
@@ -1171,8 +1172,8 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
JSLoadNamedNode n(node);
Node* receiver = n.object();
Type receiver_type = NodeProperties::GetType(receiver);
- NameRef name(broker(), NamedAccessOf(node->op()).name());
- NameRef length_str(broker(), factory()->length_string());
+ NameRef name = MakeRef(broker(), NamedAccessOf(node->op()).name());
+ NameRef length_str = MakeRef(broker(), factory()->length_string());
// Optimize "length" property of strings.
if (name.equals(length_str) && receiver_type.Is(Type::String())) {
Node* value = graph()->NewNode(simplified()->StringLength(), receiver);
@@ -1541,8 +1542,8 @@ void ReduceBuiltin(JSGraph* jsgraph, Node* node, int builtin_index, int arity,
DCHECK(Builtins::IsCpp(builtin_index));
const bool has_builtin_exit_frame = true;
- Node* stub = jsgraph->CEntryStubConstant(1, kDontSaveFPRegs, kArgvOnStack,
- has_builtin_exit_frame);
+ Node* stub = jsgraph->CEntryStubConstant(
+ 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, has_builtin_exit_frame);
node->ReplaceInput(0, stub);
const int argc = arity + BuiltinArguments::kNumExtraArgsWithReceiver;
@@ -1628,10 +1629,10 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
// Patch {node} to an indirect call via the {function}s construct stub.
bool use_builtin_construct_stub = function.shared().construct_as_builtin();
- CodeRef code(broker(),
- use_builtin_construct_stub
- ? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
- : BUILTIN_CODE(isolate(), JSConstructStubGeneric));
+ CodeRef code = MakeRef(
+ broker(), use_builtin_construct_stub
+ ? BUILTIN_CODE(isolate(), JSBuiltinsConstructStub)
+ : BUILTIN_CODE(isolate(), JSConstructStubGeneric));
STATIC_ASSERT(JSConstructNode::TargetIndex() == 0);
STATIC_ASSERT(JSConstructNode::NewTargetIndex() == 1);
node->RemoveInput(n.FeedbackVectorIndex());
@@ -1712,9 +1713,9 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
} else if (target->opcode() == IrOpcode::kJSCreateClosure) {
CreateClosureParameters const& ccp =
JSCreateClosureNode{target}.Parameters();
- shared = SharedFunctionInfoRef(broker(), ccp.shared_info());
+ shared = MakeRef(broker(), ccp.shared_info());
} else if (target->opcode() == IrOpcode::kCheckClosure) {
- FeedbackCellRef cell(broker(), FeedbackCellOf(target->op()));
+ FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
base::Optional<FeedbackVectorRef> feedback_vector = cell.value();
if (feedback_vector.has_value()) {
shared = feedback_vector->shared_function_info();
@@ -1723,6 +1724,9 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
if (shared.has_value()) {
// Do not inline the call if we need to check whether to break at entry.
+ // If this state changes during background compilation, the compilation
+ // job will be aborted from the main thread (see
+ // Debug::PrepareFunctionForDebugExecution()).
if (shared->HasBreakInfo()) return NoChange();
// Class constructors are callable, but [[Call]] will raise an exception.
diff --git a/chromium/v8/src/compiler/load-elimination.cc b/chromium/v8/src/compiler/load-elimination.cc
index 2a0189ae125..377654e4215 100644
--- a/chromium/v8/src/compiler/load-elimination.cc
+++ b/chromium/v8/src/compiler/load-elimination.cc
@@ -1076,6 +1076,7 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kMapWord:
if (Node* replacement = state->LookupElement(
object, index, access.machine_type.representation())) {
// Make sure we don't resurrect dead {replacement} nodes.
@@ -1131,6 +1132,7 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kMapWord:
state = state->AddElement(object, index, new_value,
access.machine_type.representation(), zone());
break;
@@ -1424,6 +1426,7 @@ LoadElimination::IndexRange LoadElimination::FieldIndexOf(
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kMapWord:
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
break;
diff --git a/chromium/v8/src/compiler/loop-analysis.cc b/chromium/v8/src/compiler/loop-analysis.cc
index 0d52e780049..ee56a665db7 100644
--- a/chromium/v8/src/compiler/loop-analysis.cc
+++ b/chromium/v8/src/compiler/loop-analysis.cc
@@ -547,7 +547,6 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter,
ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader(
Node* loop_header, Zone* zone, size_t max_size) {
auto* visited = zone->New<ZoneUnorderedSet<Node*>>(zone);
-
std::vector<Node*> queue;
DCHECK(loop_header->opcode() == IrOpcode::kLoop);
@@ -589,6 +588,30 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindUnnestedLoopFromHeader(
}
}
+ // Check that there is no floating control other than direct nodes to start().
+ // We do this by checking that all non-start control inputs of loop nodes are
+ // also in the loop.
+ // TODO(manoskouk): This is a safety check. Consider making it DEBUG-only when
+ // we are confident there is no incompatible floating control generated in
+ // wasm.
+ for (Node* node : *visited) {
+ // The loop header is allowed to point outside the loop.
+ if (node == loop_header) continue;
+
+ for (Edge edge : node->input_edges()) {
+ Node* input = edge.to();
+ if (NodeProperties::IsControlEdge(edge) && visited->count(input) == 0 &&
+ input->opcode() != IrOpcode::kStart) {
+ FATAL(
+ "Floating control detected in wasm turbofan graph: Node #%d:%s is "
+ "inside loop headed by #%d, but its control dependency #%d:%s is "
+ "outside",
+ node->id(), node->op()->mnemonic(), loop_header->id(), input->id(),
+ input->op()->mnemonic());
+ }
+ }
+ }
+
return visited;
}
diff --git a/chromium/v8/src/compiler/machine-graph-verifier.cc b/chromium/v8/src/compiler/machine-graph-verifier.cc
index cabf23f4d55..3aee3a8d35c 100644
--- a/chromium/v8/src/compiler/machine-graph-verifier.cc
+++ b/chromium/v8/src/compiler/machine-graph-verifier.cc
@@ -999,6 +999,7 @@ class MachineRepresentationChecker {
return IsAnyTagged(actual);
case MachineRepresentation::kCompressed:
return IsAnyCompressed(actual);
+ case MachineRepresentation::kMapWord:
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
// TODO(turbofan): At the moment, the machine graph doesn't contain
diff --git a/chromium/v8/src/compiler/machine-operator-reducer.cc b/chromium/v8/src/compiler/machine-operator-reducer.cc
index 5d61dfac6ab..3e2bacf90a6 100644
--- a/chromium/v8/src/compiler/machine-operator-reducer.cc
+++ b/chromium/v8/src/compiler/machine-operator-reducer.cc
@@ -1718,11 +1718,21 @@ Reduction MachineOperatorReducer::ReduceWordNAnd(Node* node) {
namespace {
// Represents an operation of the form `(source & mask) == masked_value`.
+// where each bit set in masked_value also has to be set in mask.
struct BitfieldCheck {
- Node* source;
- uint32_t mask;
- uint32_t masked_value;
- bool truncate_from_64_bit;
+ Node* const source;
+ uint32_t const mask;
+ uint32_t const masked_value;
+ bool const truncate_from_64_bit;
+
+ BitfieldCheck(Node* source, uint32_t mask, uint32_t masked_value,
+ bool truncate_from_64_bit)
+ : source(source),
+ mask(mask),
+ masked_value(masked_value),
+ truncate_from_64_bit(truncate_from_64_bit) {
+ CHECK_EQ(masked_value & ~mask, 0);
+ }
static base::Optional<BitfieldCheck> Detect(Node* node) {
// There are two patterns to check for here:
@@ -1737,14 +1747,16 @@ struct BitfieldCheck {
if (eq.left().IsWord32And()) {
Uint32BinopMatcher mand(eq.left().node());
if (mand.right().HasResolvedValue() && eq.right().HasResolvedValue()) {
- BitfieldCheck result{mand.left().node(), mand.right().ResolvedValue(),
- eq.right().ResolvedValue(), false};
+ uint32_t mask = mand.right().ResolvedValue();
+ uint32_t masked_value = eq.right().ResolvedValue();
+ if ((masked_value & ~mask) != 0) return {};
if (mand.left().IsTruncateInt64ToInt32()) {
- result.truncate_from_64_bit = true;
- result.source =
- NodeProperties::GetValueInput(mand.left().node(), 0);
+ return BitfieldCheck(
+ NodeProperties::GetValueInput(mand.left().node(), 0), mask,
+ masked_value, true);
+ } else {
+ return BitfieldCheck(mand.left().node(), mask, masked_value, false);
}
- return result;
}
}
} else {
@@ -1836,17 +1848,20 @@ Reduction MachineOperatorReducer::ReduceWord64And(Node* node) {
}
Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
+ // Recognize rotation, we are matching and transforming as follows:
+ // x << y | x >>> (32 - y) => x ror (32 - y)
+ // x << (32 - y) | x >>> y => x ror y
+ // x << y ^ x >>> (32 - y) => x ror (32 - y) if y & 31 != 0
+ // x << (32 - y) ^ x >>> y => x ror y if y & 31 != 0
+ // (As well as the commuted forms.)
+ // Note the side condition for XOR: the optimization doesn't hold for
+ // multiples of 32.
+
DCHECK(IrOpcode::kWord32Or == node->opcode() ||
IrOpcode::kWord32Xor == node->opcode());
Int32BinopMatcher m(node);
Node* shl = nullptr;
Node* shr = nullptr;
- // Recognize rotation, we are matching:
- // * x << y | x >>> (32 - y) => x ror (32 - y), i.e x rol y
- // * x << (32 - y) | x >>> y => x ror y
- // * x << y ^ x >>> (32 - y) => x ror (32 - y), i.e. x rol y
- // * x << (32 - y) ^ x >>> y => x ror y
- // as well as their commuted form.
if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
shl = m.left().node();
shr = m.right().node();
@@ -1863,8 +1878,13 @@ Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
if (mshl.right().HasResolvedValue() && mshr.right().HasResolvedValue()) {
// Case where y is a constant.
- if (mshl.right().ResolvedValue() + mshr.right().ResolvedValue() != 32)
+ if (mshl.right().ResolvedValue() + mshr.right().ResolvedValue() != 32) {
return NoChange();
+ }
+ if (node->opcode() == IrOpcode::kWord32Xor &&
+ (mshl.right().ResolvedValue() & 31) == 0) {
+ return NoChange();
+ }
} else {
Node* sub = nullptr;
Node* y = nullptr;
@@ -1880,6 +1900,9 @@ Reduction MachineOperatorReducer::TryMatchWord32Ror(Node* node) {
Int32BinopMatcher msub(sub);
if (!msub.left().Is(32) || msub.right().node() != y) return NoChange();
+ if (node->opcode() == IrOpcode::kWord32Xor) {
+ return NoChange(); // Can't guarantee y & 31 != 0.
+ }
}
node->ReplaceInput(0, mshl.left().node());
diff --git a/chromium/v8/src/compiler/machine-operator.cc b/chromium/v8/src/compiler/machine-operator.cc
index 1a897a32032..3d61d70b022 100644
--- a/chromium/v8/src/compiler/machine-operator.cc
+++ b/chromium/v8/src/compiler/machine-operator.cc
@@ -252,6 +252,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(Word64Shl, Operator::kNoProperties, 2, 0, 1) \
V(Word64Shr, Operator::kNoProperties, 2, 0, 1) \
V(Word64Ror, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64RorLowerable, Operator::kNoProperties, 2, 1, 1) \
V(Word64Equal, Operator::kCommutative, 2, 0, 1) \
V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Int64Sub, Operator::kNoProperties, 2, 0, 1) \
@@ -272,6 +273,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
PURE_BINARY_OP_LIST_64(V) \
V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Clz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64ClzLowerable, Operator::kNoProperties, 1, 1, 1) \
V(Word32ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
V(Simd128ReverseBytes, Operator::kNoProperties, 1, 0, 1) \
@@ -566,8 +568,10 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
V(Word64Ctz, Operator::kNoProperties, 1, 0, 1) \
+ V(Word64CtzLowerable, Operator::kNoProperties, 1, 1, 1) \
V(Word32Rol, Operator::kNoProperties, 2, 0, 1) \
V(Word64Rol, Operator::kNoProperties, 2, 0, 1) \
+ V(Word64RolLowerable, Operator::kNoProperties, 2, 1, 1) \
V(Word32ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Word64ReverseBits, Operator::kNoProperties, 1, 0, 1) \
V(Int32AbsWithOverflow, Operator::kNoProperties, 1, 0, 2) \
@@ -583,6 +587,8 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
V(Float32RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesEven, Operator::kNoProperties, 1, 0, 1) \
+ V(Word32Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Word64Select, Operator::kNoProperties, 3, 0, 1) \
V(Float32Select, Operator::kNoProperties, 3, 0, 1) \
V(Float64Select, Operator::kNoProperties, 3, 0, 1)
@@ -610,6 +616,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(Pointer) \
V(TaggedSigned) \
V(TaggedPointer) \
+ V(MapInHeader) \
V(AnyTagged) \
V(CompressedPointer) \
V(AnyCompressed)
@@ -622,6 +629,7 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(kWord16) \
V(kWord32) \
V(kWord64) \
+ V(kMapWord) \
V(kTaggedSigned) \
V(kTaggedPointer) \
V(kTagged) \
@@ -1332,6 +1340,7 @@ OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
+ DCHECK(!rep.IsMapWord());
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
return &cache_.kLoad##Type; \
@@ -1491,6 +1500,7 @@ const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
+ DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
switch (store_rep.representation()) {
#define STORE(kRep) \
case MachineRepresentation::kRep: \
diff --git a/chromium/v8/src/compiler/machine-operator.h b/chromium/v8/src/compiler/machine-operator.h
index 87a2eb891f1..0ee3649ad0c 100644
--- a/chromium/v8/src/compiler/machine-operator.h
+++ b/chromium/v8/src/compiler/machine-operator.h
@@ -104,7 +104,8 @@ V8_EXPORT_PRIVATE LoadLaneParameters const& LoadLaneParametersOf(
Operator const*) V8_WARN_UNUSED_RESULT;
// A Store needs a MachineType and a WriteBarrierKind in order to emit the
-// correct write barrier.
+// correct write barrier, and needs to state whether it is storing into the
+// header word, so that the value can be packed, if necessary.
class StoreRepresentation final {
public:
StoreRepresentation(MachineRepresentation representation,
@@ -248,25 +249,30 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
kWord32ShiftIsSafe = 1u << 11,
kWord32Ctz = 1u << 12,
kWord64Ctz = 1u << 13,
- kWord32Popcnt = 1u << 14,
- kWord64Popcnt = 1u << 15,
- kWord32ReverseBits = 1u << 16,
- kWord64ReverseBits = 1u << 17,
- kFloat32Select = 1u << 18,
- kFloat64Select = 1u << 19,
- kInt32AbsWithOverflow = 1u << 20,
- kInt64AbsWithOverflow = 1u << 21,
- kWord32Rol = 1u << 22,
- kWord64Rol = 1u << 23,
- kSatConversionIsSafe = 1u << 24,
+ kWord64CtzLowerable = 1u << 14,
+ kWord32Popcnt = 1u << 15,
+ kWord64Popcnt = 1u << 16,
+ kWord32ReverseBits = 1u << 17,
+ kWord64ReverseBits = 1u << 18,
+ kFloat32Select = 1u << 19,
+ kFloat64Select = 1u << 20,
+ kInt32AbsWithOverflow = 1u << 21,
+ kInt64AbsWithOverflow = 1u << 22,
+ kWord32Rol = 1u << 23,
+ kWord64Rol = 1u << 24,
+ kWord64RolLowerable = 1u << 25,
+ kSatConversionIsSafe = 1u << 26,
+ kWord32Select = 1u << 27,
+ kWord64Select = 1u << 28,
kAllOptionalOps =
kFloat32RoundDown | kFloat64RoundDown | kFloat32RoundUp |
kFloat64RoundUp | kFloat32RoundTruncate | kFloat64RoundTruncate |
kFloat64RoundTiesAway | kFloat32RoundTiesEven | kFloat64RoundTiesEven |
- kWord32Ctz | kWord64Ctz | kWord32Popcnt | kWord64Popcnt |
- kWord32ReverseBits | kWord64ReverseBits | kInt32AbsWithOverflow |
- kInt64AbsWithOverflow | kWord32Rol | kWord64Rol | kSatConversionIsSafe |
- kFloat32Select | kFloat64Select
+ kWord32Ctz | kWord64Ctz | kWord64CtzLowerable | kWord32Popcnt |
+ kWord64Popcnt | kWord32ReverseBits | kWord64ReverseBits |
+ kInt32AbsWithOverflow | kInt64AbsWithOverflow | kWord32Rol |
+ kWord64Rol | kWord64RolLowerable | kSatConversionIsSafe |
+ kFloat32Select | kFloat64Select | kWord32Select | kWord64Select
};
using Flags = base::Flags<Flag, unsigned>;
@@ -386,10 +392,21 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Word64SarShiftOutZeros() {
return Word64Sar(ShiftKind::kShiftOutZeros);
}
+
+ // 64-bit rol, ror, clz and ctz operators have two versions: the non-suffixed
+ // ones are meant to be used in 64-bit systems and have no control input. The
+ // "Lowerable"-suffixed ones are meant to be temporary operators in 32-bit
+ // systems and will be lowered to 32-bit operators. They have a control input
+ // to enable the lowering.
const OptionalOperator Word64Rol();
const Operator* Word64Ror();
const Operator* Word64Clz();
const OptionalOperator Word64Ctz();
+ const OptionalOperator Word64RolLowerable();
+ const Operator* Word64RorLowerable();
+ const Operator* Word64ClzLowerable();
+ const OptionalOperator Word64CtzLowerable();
+
const Operator* Word64Equal();
const Operator* Int32PairAdd();
@@ -562,7 +579,11 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const OptionalOperator Float32RoundTiesEven();
const OptionalOperator Float64RoundTiesEven();
- // Floating point conditional selects.
+ // Conditional selects. Input 1 is the condition, Input 2 is the result value
+ // if the condition is {true}, Input 3 is the result value if the condition is
+ // false.
+ const OptionalOperator Word32Select();
+ const OptionalOperator Word64Select();
const OptionalOperator Float32Select();
const OptionalOperator Float64Select();
diff --git a/chromium/v8/src/compiler/map-inference.cc b/chromium/v8/src/compiler/map-inference.cc
index 1e1a59d7843..b6c96163c68 100644
--- a/chromium/v8/src/compiler/map-inference.cc
+++ b/chromium/v8/src/compiler/map-inference.cc
@@ -68,7 +68,7 @@ bool MapInference::AllOfInstanceTypesUnsafe(
CHECK(HaveMaps());
auto instance_type = [this, f](Handle<Map> map) {
- MapRef map_ref(broker_, map);
+ MapRef map_ref = MakeRef(broker_, map);
return f(map_ref.instance_type());
};
return std::all_of(maps_.begin(), maps_.end(), instance_type);
@@ -79,7 +79,7 @@ bool MapInference::AnyOfInstanceTypesUnsafe(
CHECK(HaveMaps());
auto instance_type = [this, f](Handle<Map> map) {
- MapRef map_ref(broker_, map);
+ MapRef map_ref = MakeRef(broker_, map);
return f(map_ref.instance_type());
};
@@ -134,13 +134,13 @@ bool MapInference::RelyOnMapsHelper(CompilationDependencies* dependencies,
if (Safe()) return true;
auto is_stable = [this](Handle<Map> map) {
- MapRef map_ref(broker_, map);
+ MapRef map_ref = MakeRef(broker_, map);
return map_ref.is_stable();
};
if (dependencies != nullptr &&
std::all_of(maps_.cbegin(), maps_.cend(), is_stable)) {
for (Handle<Map> map : maps_) {
- dependencies->DependOnStableMap(MapRef(broker_, map));
+ dependencies->DependOnStableMap(MakeRef(broker_, map));
}
SetGuarded();
return true;
diff --git a/chromium/v8/src/compiler/memory-lowering.cc b/chromium/v8/src/compiler/memory-lowering.cc
index b8a75ee9502..ac113ddd70e 100644
--- a/chromium/v8/src/compiler/memory-lowering.cc
+++ b/chromium/v8/src/compiler/memory-lowering.cc
@@ -4,8 +4,9 @@
#include "src/compiler/memory-lowering.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/common/external-pointer.h"
+#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
@@ -41,6 +42,15 @@ class MemoryLowering::AllocationGroup final : public ZoneObject {
AllocationType const allocation_;
Node* const size_;
+ static inline AllocationType CheckAllocationType(AllocationType allocation) {
+ // For non-generational heap, all young allocations are redirected to old
+ // space.
+ if (FLAG_single_generation && allocation == AllocationType::kYoung) {
+ return AllocationType::kOld;
+ }
+ return allocation;
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationGroup);
};
@@ -98,6 +108,9 @@ Reduction MemoryLowering::ReduceAllocateRaw(
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
DCHECK_IMPLIES(allocation_folding_ == AllocationFolding::kDoAllocationFolding,
state_ptr != nullptr);
+ if (FLAG_single_generation && allocation_type == AllocationType::kYoung) {
+ allocation_type = AllocationType::kOld;
+ }
// Code objects may have a maximum size smaller than kMaxHeapObjectSize due to
// guard pages. If we need to support allocating code here we would need to
// call MemoryChunkLayout::MaxRegularCodeObjectSize() at runtime.
@@ -292,12 +305,20 @@ Reduction MemoryLowering::ReduceAllocateRaw(
Reduction MemoryLowering::ReduceLoadFromObject(Node* node) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
ObjectAccess const& access = ObjectAccessOf(node->op());
- MachineRepresentation rep = access.machine_type.representation();
- const Operator* load_op = ElementSizeInBytes(rep) > kTaggedSize &&
- !machine()->UnalignedLoadSupported(
- access.machine_type.representation())
- ? machine()->UnalignedLoad(access.machine_type)
- : machine()->Load(access.machine_type);
+
+ MachineType machine_type = access.machine_type;
+
+ if (machine_type.IsMapWord()) {
+ CHECK_EQ(machine_type.semantic(), MachineSemantic::kAny);
+ return ReduceLoadMap(node);
+ }
+
+ MachineRepresentation rep = machine_type.representation();
+ const Operator* load_op =
+ ElementSizeInBytes(rep) > kTaggedSize &&
+ !machine()->UnalignedLoadSupported(machine_type.representation())
+ ? machine()->UnalignedLoad(machine_type)
+ : machine()->Load(machine_type);
NodeProperties::ChangeOp(node, load_op);
return Changed(node);
}
@@ -308,6 +329,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
Node* index = node->InputAt(1);
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
+ DCHECK(!type.IsMapWord());
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
@@ -345,8 +367,8 @@ Node* MemoryLowering::DecodeExternalPointer(
Node* decoded_ptr =
__ Load(MachineType::Pointer(), table, __ ChangeUint32ToUint64(offset));
if (external_pointer_tag != 0) {
- Node* tag = __ IntPtrConstant(external_pointer_tag);
- decoded_ptr = __ WordXor(decoded_ptr, tag);
+ Node* tag = __ IntPtrConstant(~external_pointer_tag);
+ decoded_ptr = __ WordAnd(decoded_ptr, tag);
}
return decoded_ptr;
#else
@@ -354,6 +376,22 @@ Node* MemoryLowering::DecodeExternalPointer(
#endif // V8_HEAP_SANDBOX
}
+Reduction MemoryLowering::ReduceLoadMap(Node* node) {
+#ifdef V8_MAP_PACKING
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ __ InitializeEffectControl(effect, control);
+
+ node = __ AddNode(graph()->CloneNode(node));
+ return Replace(__ UnpackMapWord(node));
+#else
+ NodeProperties::ChangeOp(node, machine()->Load(MachineType::TaggedPointer()));
+ return Changed(node);
+#endif
+}
+
Reduction MemoryLowering::ReduceLoadField(Node* node) {
DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
FieldAccess const& access = FieldAccessOf(node->op());
@@ -365,11 +403,19 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
// External pointer table indices are 32bit numbers
type = MachineType::Uint32();
}
+
+ if (type.IsMapWord()) {
+ DCHECK(!NeedsPoisoning(access.load_sensitivity));
+ DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
+ return ReduceLoadMap(node);
+ }
+
if (NeedsPoisoning(access.load_sensitivity)) {
NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
} else {
NodeProperties::ChangeOp(node, machine()->Load(type));
}
+
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
#ifdef V8_HEAP_SANDBOX
@@ -391,8 +437,10 @@ Reduction MemoryLowering::ReduceStoreToObject(Node* node,
ObjectAccess const& access = ObjectAccessOf(node->op());
Node* object = node->InputAt(0);
Node* value = node->InputAt(2);
+
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
+ DCHECK(!access.machine_type.IsMapWord());
MachineRepresentation rep = access.machine_type.representation();
StoreRepresentation store_rep(rep, write_barrier_kind);
const Operator* store_op = ElementSizeInBytes(rep) > kTaggedSize &&
@@ -427,15 +475,29 @@ Reduction MemoryLowering::ReduceStoreField(Node* node,
DCHECK_IMPLIES(V8_HEAP_SANDBOX_BOOL,
!access.type.Is(Type::ExternalPointer()) &&
!access.type.Is(Type::SandboxedExternalPointer()));
+ MachineType machine_type = access.machine_type;
Node* object = node->InputAt(0);
Node* value = node->InputAt(1);
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ __ InitializeEffectControl(effect, control);
+
WriteBarrierKind write_barrier_kind = ComputeWriteBarrierKind(
node, object, value, state, access.write_barrier_kind);
Node* offset = __ IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph_zone(), 1, offset);
+
+ if (machine_type.IsMapWord()) {
+ machine_type = MachineType::TaggedPointer();
+#ifdef V8_MAP_PACKING
+ Node* mapword = __ PackMapWord(TNode<Map>::UncheckedCast(value));
+ node->ReplaceInput(2, mapword);
+#endif
+ }
NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(
- access.machine_type.representation(), write_barrier_kind)));
+ node, machine()->Store(StoreRepresentation(machine_type.representation(),
+ write_barrier_kind)));
return Changed(node);
}
@@ -513,6 +575,9 @@ WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
if (!ValueNeedsWriteBarrier(value, isolate())) {
write_barrier_kind = kNoWriteBarrier;
}
+ if (FLAG_disable_write_barriers) {
+ write_barrier_kind = kNoWriteBarrier;
+ }
if (write_barrier_kind == WriteBarrierKind::kAssertNoWriteBarrier) {
write_barrier_assert_failed_(node, object, function_debug_name_, zone());
}
@@ -537,14 +602,18 @@ bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
- : node_ids_(zone), allocation_(allocation), size_(nullptr) {
+ : node_ids_(zone),
+ allocation_(CheckAllocationType(allocation)),
+ size_(nullptr) {
node_ids_.insert(node->id());
}
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Node* size, Zone* zone)
- : node_ids_(zone), allocation_(allocation), size_(size) {
+ : node_ids_(zone),
+ allocation_(CheckAllocationType(allocation)),
+ size_(size) {
node_ids_.insert(node->id());
}
diff --git a/chromium/v8/src/compiler/memory-lowering.h b/chromium/v8/src/compiler/memory-lowering.h
index 7ad02b95afe..1ebbf40bc82 100644
--- a/chromium/v8/src/compiler/memory-lowering.h
+++ b/chromium/v8/src/compiler/memory-lowering.h
@@ -110,6 +110,7 @@ class MemoryLowering final : public Reducer {
AllocationState const* state,
WriteBarrierKind);
Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
+ Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
diff --git a/chromium/v8/src/compiler/memory-optimizer.cc b/chromium/v8/src/compiler/memory-optimizer.cc
index 1f36f25c298..7d6dcf880f7 100644
--- a/chromium/v8/src/compiler/memory-optimizer.cc
+++ b/chromium/v8/src/compiler/memory-optimizer.cc
@@ -257,6 +257,15 @@ bool MemoryOptimizer::AllocationTypeNeedsUpdateToOld(Node* const node,
return false;
}
+void MemoryOptimizer::ReplaceUsesAndKillNode(Node* node, Node* replacement) {
+ // Replace all uses of node and kill the node to make sure we don't leave
+ // dangling dead uses.
+ DCHECK_NE(replacement, node);
+ NodeProperties::ReplaceUses(node, replacement, graph_assembler_.effect(),
+ graph_assembler_.control());
+ node->Kill();
+}
+
void MemoryOptimizer::VisitAllocateRaw(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocateRaw, node->opcode());
@@ -294,12 +303,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
node, allocation_type, allocation.allow_large_objects(), &state);
CHECK(reduction.Changed() && reduction.replacement() != node);
- // Replace all uses of node and kill the node to make sure we don't leave
- // dangling dead uses.
- NodeProperties::ReplaceUses(node, reduction.replacement(),
- graph_assembler_.effect(),
- graph_assembler_.control());
- node->Kill();
+ ReplaceUsesAndKillNode(node, reduction.replacement());
EnqueueUses(state->effect(), state);
}
@@ -307,8 +311,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
void MemoryOptimizer::VisitLoadFromObject(Node* node,
AllocationState const* state) {
DCHECK_EQ(IrOpcode::kLoadFromObject, node->opcode());
- memory_lowering()->ReduceLoadFromObject(node);
+ Reduction reduction = memory_lowering()->ReduceLoadFromObject(node);
EnqueueUses(node, state);
+ if (V8_MAP_PACKING_BOOL && reduction.replacement() != node) {
+ ReplaceUsesAndKillNode(node, reduction.replacement());
+ }
}
void MemoryOptimizer::VisitStoreToObject(Node* node,
@@ -333,16 +340,14 @@ void MemoryOptimizer::VisitLoadField(Node* node, AllocationState const* state) {
// lowering, so we can proceed iterating the graph from the node uses.
EnqueueUses(node, state);
- // Node can be replaced only when V8_HEAP_SANDBOX_BOOL is enabled and
- // when loading an external pointer value.
- DCHECK_IMPLIES(!V8_HEAP_SANDBOX_BOOL, reduction.replacement() == node);
- if (V8_HEAP_SANDBOX_BOOL && reduction.replacement() != node) {
- // Replace all uses of node and kill the node to make sure we don't leave
- // dangling dead uses.
- NodeProperties::ReplaceUses(node, reduction.replacement(),
- graph_assembler_.effect(),
- graph_assembler_.control());
- node->Kill();
+ // Node can be replaced under two cases:
+ // 1. V8_HEAP_SANDBOX_BOOL is enabled and loading an external pointer value.
+ // 2. V8_MAP_PACKING_BOOL is enabled.
+ DCHECK_IMPLIES(!V8_HEAP_SANDBOX_BOOL && !V8_MAP_PACKING_BOOL,
+ reduction.replacement() == node);
+ if ((V8_HEAP_SANDBOX_BOOL || V8_MAP_PACKING_BOOL) &&
+ reduction.replacement() != node) {
+ ReplaceUsesAndKillNode(node, reduction.replacement());
}
}
diff --git a/chromium/v8/src/compiler/memory-optimizer.h b/chromium/v8/src/compiler/memory-optimizer.h
index 0f85fea1910..3845304fdd6 100644
--- a/chromium/v8/src/compiler/memory-optimizer.h
+++ b/chromium/v8/src/compiler/memory-optimizer.h
@@ -68,6 +68,8 @@ class MemoryOptimizer final {
void EnqueueUses(Node*, AllocationState const*);
void EnqueueUse(Node*, int, AllocationState const*);
+ void ReplaceUsesAndKillNode(Node* node, Node* replacement);
+
// Returns true if the AllocationType of the current AllocateRaw node that we
// are visiting needs to be updated to kOld, due to propagation of tenuring
// from outer to inner allocations.
diff --git a/chromium/v8/src/compiler/node-matchers.h b/chromium/v8/src/compiler/node-matchers.h
index e5554d3cba4..05ba661b511 100644
--- a/chromium/v8/src/compiler/node-matchers.h
+++ b/chromium/v8/src/compiler/node-matchers.h
@@ -238,7 +238,14 @@ struct HeapObjectMatcherImpl final
}
HeapObjectRef Ref(JSHeapBroker* broker) const {
- return HeapObjectRef(broker, this->ResolvedValue());
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption is
+ // that any handle inserted into the graph is safe to read; but we don't
+ // preserve the reason why it is safe to read. Thus we must over-approximate
+ // here and assume the existence of a memory fence. In the future, we should
+ // consider having the graph store ObjectRefs or ObjectData pointer instead,
+ // which would make new ref construction here unnecessary.
+ return MakeRefAssumeMemoryFence(broker, this->ResolvedValue());
}
};
diff --git a/chromium/v8/src/compiler/node-observer.h b/chromium/v8/src/compiler/node-observer.h
index 8978156464e..a6c4619262d 100644
--- a/chromium/v8/src/compiler/node-observer.h
+++ b/chromium/v8/src/compiler/node-observer.h
@@ -80,7 +80,7 @@ class NodeObserver : public ZoneObject {
bool has_observed_changes() const { return has_observed_changes_; }
private:
- bool has_observed_changes_ = false;
+ std::atomic<bool> has_observed_changes_{false};
};
inline NodeObserver::~NodeObserver() = default;
diff --git a/chromium/v8/src/compiler/node-properties.h b/chromium/v8/src/compiler/node-properties.h
index bec18b1e59b..50f3a171361 100644
--- a/chromium/v8/src/compiler/node-properties.h
+++ b/chromium/v8/src/compiler/node-properties.h
@@ -28,7 +28,7 @@ class V8_EXPORT_PRIVATE NodeProperties {
// Inputs are always arranged in order as follows:
// 0 [ values, context, frame state, effects, control ] node->InputCount()
- static int FirstValueIndex(Node* node) { return 0; }
+ static int FirstValueIndex(const Node* node) { return 0; }
static int FirstContextIndex(Node* node) { return PastValueIndex(node); }
static int FirstFrameStateIndex(Node* node) { return PastContextIndex(node); }
static int FirstEffectIndex(Node* node) { return PastFrameStateIndex(node); }
@@ -65,6 +65,12 @@ class V8_EXPORT_PRIVATE NodeProperties {
return node->InputAt(FirstValueIndex(node) + index);
}
+ static const Node* GetValueInput(const Node* node, int index) {
+ CHECK_LE(0, index);
+ CHECK_LT(index, node->op()->ValueInputCount());
+ return node->InputAt(FirstValueIndex(node) + index);
+ }
+
static Node* GetContextInput(Node* node) {
CHECK(OperatorProperties::HasContextInput(node->op()));
return node->InputAt(FirstContextIndex(node));
@@ -249,7 +255,7 @@ class V8_EXPORT_PRIVATE NodeProperties {
// Type.
static bool IsTyped(const Node* node) { return !node->type().IsInvalid(); }
- static Type GetType(Node* node) {
+ static Type GetType(const Node* node) {
DCHECK(IsTyped(node));
return node->type();
}
diff --git a/chromium/v8/src/compiler/opcodes.h b/chromium/v8/src/compiler/opcodes.h
index 3e5314d8574..f51c82df487 100644
--- a/chromium/v8/src/compiler/opcodes.h
+++ b/chromium/v8/src/compiler/opcodes.h
@@ -385,7 +385,6 @@
V(NumberSilenceNaN)
#define SIMPLIFIED_BIGINT_UNOP_LIST(V) \
- V(BigIntAsUintN) \
V(BigIntNegate) \
V(CheckBigInt)
@@ -491,13 +490,16 @@
V(TransitionAndStoreNumberElement) \
V(TransitionElementsKind) \
V(TypeOf) \
- V(UpdateInterruptBudget)
+ V(UpdateInterruptBudget) \
+ V(VerifyType)
#define SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(V) \
V(SpeculativeBigIntAdd) \
V(SpeculativeBigIntSubtract)
-#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) V(SpeculativeBigIntNegate)
+#define SIMPLIFIED_SPECULATIVE_BIGINT_UNOP_LIST(V) \
+ V(SpeculativeBigIntAsUintN) \
+ V(SpeculativeBigIntNegate)
#define SIMPLIFIED_OP_LIST(V) \
SIMPLIFIED_CHANGE_OP_LIST(V) \
@@ -570,6 +572,8 @@
V(Word64Sar) \
V(Word64Rol) \
V(Word64Ror) \
+ V(Word64RolLowerable) \
+ V(Word64RorLowerable) \
V(Int64Add) \
V(Int64AddWithOverflow) \
V(Int64Sub) \
@@ -688,6 +692,8 @@
V(Word64Popcnt) \
V(Word64Clz) \
V(Word64Ctz) \
+ V(Word64ClzLowerable) \
+ V(Word64CtzLowerable) \
V(Word64ReverseBits) \
V(Word64ReverseBytes) \
V(Simd128ReverseBytes) \
@@ -734,6 +740,8 @@
V(Float64ExtractHighWord32) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
+ V(Word32Select) \
+ V(Word64Select) \
V(Float32Select) \
V(Float64Select) \
V(TaggedPoisonOnSpeculation) \
diff --git a/chromium/v8/src/compiler/operation-typer.cc b/chromium/v8/src/compiler/operation-typer.cc
index 8b889c6948a..9e5816559e5 100644
--- a/chromium/v8/src/compiler/operation-typer.cc
+++ b/chromium/v8/src/compiler/operation-typer.cc
@@ -576,8 +576,7 @@ Type OperationTyper::NumberSilenceNaN(Type type) {
return type;
}
-Type OperationTyper::BigIntAsUintN(Type type) {
- DCHECK(type.Is(Type::BigInt()));
+Type OperationTyper::SpeculativeBigIntAsUintN(Type type) {
return Type::BigInt();
}
@@ -1263,14 +1262,12 @@ Type OperationTyper::StrictEqual(Type lhs, Type rhs) {
Type OperationTyper::CheckBounds(Type index, Type length) {
DCHECK(length.Is(cache_->kPositiveSafeInteger));
if (length.Is(cache_->kSingletonZero)) return Type::None();
- Type mask = Type::Range(0.0, length.Max() - 1, zone());
+ Type const upper_bound = Type::Range(0.0, length.Max() - 1, zone());
+ if (index.Maybe(Type::String())) return upper_bound;
if (index.Maybe(Type::MinusZero())) {
index = Type::Union(index, cache_->kSingletonZero, zone());
}
- if (index.Maybe(Type::String())) {
- index = Type::Union(index, cache_->kIntPtr, zone());
- }
- return Type::Intersect(index, mask, zone());
+ return Type::Intersect(index, upper_bound, zone());
}
Type OperationTyper::CheckFloat64Hole(Type type) {
diff --git a/chromium/v8/src/compiler/operator.h b/chromium/v8/src/compiler/operator.h
index e47441208f1..4206e753f1e 100644
--- a/chromium/v8/src/compiler/operator.h
+++ b/chromium/v8/src/compiler/operator.h
@@ -5,7 +5,7 @@
#ifndef V8_COMPILER_OPERATOR_H_
#define V8_COMPILER_OPERATOR_H_
-#include <ostream> // NOLINT(readability/streams)
+#include <ostream>
#include "src/base/compiler-specific.h"
#include "src/base/flags.h"
diff --git a/chromium/v8/src/compiler/pipeline.cc b/chromium/v8/src/compiler/pipeline.cc
index f80f3064cef..2ce65780ca3 100644
--- a/chromium/v8/src/compiler/pipeline.cc
+++ b/chromium/v8/src/compiler/pipeline.cc
@@ -4,7 +4,7 @@
#include "src/compiler/pipeline.h"
-#include <fstream> // NOLINT(readability/streams)
+#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
@@ -67,7 +67,6 @@
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/redundancy-elimination.h"
#include "src/compiler/schedule.h"
-#include "src/compiler/scheduled-machine-lowering.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/select-lowering.h"
#include "src/compiler/serializer-for-background-compilation.h"
@@ -139,8 +138,7 @@ class PipelineData {
// For main entry point.
PipelineData(ZoneStats* zone_stats, Isolate* isolate,
OptimizedCompilationInfo* info,
- PipelineStatistics* pipeline_statistics,
- bool is_concurrent_inlining)
+ PipelineStatistics* pipeline_statistics)
: isolate_(isolate),
allocator_(isolate->allocator()),
info_(info),
@@ -157,9 +155,9 @@ class PipelineData {
instruction_zone_(instruction_zone_scope_.zone()),
codegen_zone_scope_(zone_stats_, kCodegenZoneName),
codegen_zone_(codegen_zone_scope_.zone()),
- broker_(new JSHeapBroker(isolate_, info_->zone(),
- info_->trace_heap_broker(),
- is_concurrent_inlining, info->code_kind())),
+ broker_(new JSHeapBroker(
+ isolate_, info_->zone(), info_->trace_heap_broker(),
+ info_->concurrent_inlining(), info->code_kind())),
register_allocation_zone_scope_(zone_stats_,
kRegisterAllocationZoneName),
register_allocation_zone_(register_allocation_zone_scope_.zone()),
@@ -783,6 +781,7 @@ class NodeOriginsWrapper final : public Reducer {
class V8_NODISCARD PipelineRunScope {
public:
+#ifdef V8_RUNTIME_CALL_STATS
PipelineRunScope(
PipelineData* data, const char* phase_name,
RuntimeCallCounterId runtime_call_counter_id,
@@ -794,6 +793,14 @@ class V8_NODISCARD PipelineRunScope {
runtime_call_counter_id, counter_mode) {
DCHECK_NOT_NULL(phase_name);
}
+#else // V8_RUNTIME_CALL_STATS
+ PipelineRunScope(PipelineData* data, const char* phase_name)
+ : phase_scope_(data->pipeline_statistics(), phase_name),
+ zone_scope_(data->zone_stats(), phase_name),
+ origin_scope_(data->node_origins(), phase_name) {
+ DCHECK_NOT_NULL(phase_name);
+ }
+#endif // V8_RUNTIME_CALL_STATS
Zone* zone() { return zone_scope_.zone(); }
@@ -801,7 +808,9 @@ class V8_NODISCARD PipelineRunScope {
PhaseScope phase_scope_;
ZoneStats::Scope zone_scope_;
NodeOriginTable::PhaseScope origin_scope_;
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallTimerScope runtime_call_timer_scope;
+#endif // V8_RUNTIME_CALL_STATS
};
// LocalIsolateScope encapsulates the phase where persistent handles are
@@ -902,8 +911,7 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
const bool print_code =
FLAG_print_code ||
(info->IsOptimizing() && FLAG_print_opt_code &&
- info->shared_info()->PassesFilter(FLAG_print_opt_code_filter)) ||
- (info->IsNativeContextIndependent() && FLAG_print_nci_code);
+ info->shared_info()->PassesFilter(FLAG_print_opt_code_filter));
if (print_code) {
std::unique_ptr<char[]> debug_name = info->GetDebugName();
CodeTracer::StreamScope tracing_scope(isolate->GetCodeTracer());
@@ -1095,15 +1103,6 @@ class PipelineCompilationJob final : public OptimizedCompilationJob {
Linkage* linkage_;
};
-namespace {
-
-bool ShouldUseConcurrentInlining(CodeKind code_kind, bool is_osr) {
- if (is_osr) return false;
- return code_kind == CodeKind::TURBOPROP || FLAG_concurrent_inlining;
-}
-
-} // namespace
-
PipelineCompilationJob::PipelineCompilationJob(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info,
Handle<JSFunction> function, BytecodeOffset osr_offset,
@@ -1116,17 +1115,14 @@ PipelineCompilationJob::PipelineCompilationJob(
kPipelineCompilationJobZoneName),
zone_stats_(function->GetIsolate()->allocator()),
compilation_info_(&zone_, function->GetIsolate(), shared_info, function,
- code_kind),
+ code_kind, osr_offset, osr_frame),
pipeline_statistics_(CreatePipelineStatistics(
handle(Script::cast(shared_info->script()), isolate),
compilation_info(), function->GetIsolate(), &zone_stats_)),
data_(&zone_stats_, function->GetIsolate(), compilation_info(),
- pipeline_statistics_.get(),
- ShouldUseConcurrentInlining(code_kind, !osr_offset.IsNone())),
+ pipeline_statistics_.get()),
pipeline_(&data_),
- linkage_(nullptr) {
- compilation_info_.SetOptimizingForOsr(osr_offset, osr_frame);
-}
+ linkage_(nullptr) {}
PipelineCompilationJob::~PipelineCompilationJob() = default;
@@ -1159,14 +1155,13 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
return AbortOptimization(BailoutReason::kFunctionTooBig);
}
- if (!FLAG_always_opt && !compilation_info()->IsNativeContextIndependent()) {
+ if (!FLAG_always_opt) {
compilation_info()->set_bailout_on_uninitialized();
}
if (FLAG_turbo_loop_peeling) {
compilation_info()->set_loop_peeling();
}
- if (FLAG_turbo_inlining && !compilation_info()->IsTurboprop() &&
- !compilation_info()->IsNativeContextIndependent()) {
+ if (FLAG_turbo_inlining && !compilation_info()->IsTurboprop()) {
compilation_info()->set_inlining();
}
@@ -1188,13 +1183,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
// Determine whether to specialize the code for the function's context.
// We can't do this in the case of OSR, because we want to cache the
// generated code on the native context keyed on SharedFunctionInfo.
- // We also can't do this for native context independent code (yet).
// TODO(mythria): Check if it is better to key the OSR cache on JSFunction and
// allow context specialization for OSR code.
if (compilation_info()->closure()->raw_feedback_cell().map() ==
ReadOnlyRoots(isolate).one_closure_cell_map() &&
!compilation_info()->is_osr() &&
- !compilation_info()->IsNativeContextIndependent() &&
!compilation_info()->IsTurboprop()) {
compilation_info()->set_function_context_specializing();
data_.ChooseSpecializationContext();
@@ -1222,7 +1215,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
}
}
- if (FLAG_turbo_direct_heap_access) {
+ if (compilation_info()->concurrent_inlining()) {
isolate->heap()->PublishPendingAllocations();
}
@@ -1262,8 +1255,7 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
// Ensure that the RuntimeCallStats table of main thread is available for
// phases happening during PrepareJob.
PipelineJobScope scope(&data_, isolate->counters()->runtime_call_stats());
- RuntimeCallTimerScope runtimeTimer(
- isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeFinalizePipelineJob);
MaybeHandle<Code> maybe_code = pipeline_.FinalizeCode();
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
@@ -1309,17 +1301,26 @@ void PipelineCompilationJob::RegisterWeakObjectsInOptimizedCode(
template <typename Phase, typename... Args>
void PipelineImpl::Run(Args&&... args) {
+#ifdef V8_RUNTIME_CALL_STATS
PipelineRunScope scope(this->data_, Phase::phase_name(),
Phase::kRuntimeCallCounterId, Phase::kCounterMode);
+#else
+ PipelineRunScope scope(this->data_, Phase::phase_name());
+#endif
Phase phase;
phase.Run(this->data_, scope.zone(), std::forward<Args>(args)...);
}
+#ifdef V8_RUNTIME_CALL_STATS
#define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode) \
static const char* phase_name() { return "V8.TF" #Name; } \
static constexpr RuntimeCallCounterId kRuntimeCallCounterId = \
RuntimeCallCounterId::kOptimize##Name; \
static constexpr RuntimeCallStats::CounterMode kCounterMode = Mode;
+#else // V8_RUNTIME_CALL_STATS
+#define DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, Mode) \
+ static const char* phase_name() { return "V8.TF" #Name; }
+#endif // V8_RUNTIME_CALL_STATS
#define DECL_PIPELINE_PHASE_CONSTANTS(Name) \
DECL_PIPELINE_PHASE_CONSTANTS_HELPER(Name, RuntimeCallStats::kThreadSpecific)
@@ -1339,7 +1340,7 @@ struct GraphBuilderPhase {
flags |= BytecodeGraphBuilderFlag::kBailoutOnUninitialized;
}
- JSFunctionRef closure(data->broker(), data->info()->closure());
+ JSFunctionRef closure = MakeRef(data->broker(), data->info()->closure());
CallFrequency frequency(1.0f);
BuildGraphFromBytecode(
data->broker(), temp_zone, closure.shared(),
@@ -1361,7 +1362,8 @@ struct InliningPhase {
data->broker(), data->jsgraph()->Dead(),
data->observe_node_manager());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ data->common(), temp_zone,
+ info->concurrent_inlining());
CheckpointElimination checkpoint_elimination(&graph_reducer);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
@@ -1370,7 +1372,7 @@ struct InliningPhase {
if (data->info()->bailout_on_uninitialized()) {
call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
}
- if (FLAG_turbo_inline_js_wasm_calls && data->info()->inlining()) {
+ if (data->info()->inline_js_wasm_calls() && data->info()->inlining()) {
call_reducer_flags |= JSCallReducer::kInlineJSToWasmCalls;
}
JSCallReducer call_reducer(&graph_reducer, data->jsgraph(), data->broker(),
@@ -1400,12 +1402,12 @@ struct InliningPhase {
JSIntrinsicLowering intrinsic_lowering(&graph_reducer, data->jsgraph(),
data->broker());
AddReducer(data, &graph_reducer, &dead_code_elimination);
- AddReducer(data, &graph_reducer, &checkpoint_elimination);
- AddReducer(data, &graph_reducer, &common_reducer);
- if (!data->info()->IsNativeContextIndependent()) {
- AddReducer(data, &graph_reducer, &native_context_specialization);
- AddReducer(data, &graph_reducer, &context_specialization);
+ if (!data->info()->IsTurboprop()) {
+ AddReducer(data, &graph_reducer, &checkpoint_elimination);
+ AddReducer(data, &graph_reducer, &common_reducer);
}
+ AddReducer(data, &graph_reducer, &native_context_specialization);
+ AddReducer(data, &graph_reducer, &context_specialization);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
AddReducer(data, &graph_reducer, &call_reducer);
if (data->info()->inlining()) {
@@ -1431,7 +1433,8 @@ struct WasmInliningPhase {
GraphReducer graph_reducer(temp_zone, data->graph(), &info->tick_counter(),
data->broker(), data->jsgraph()->Dead());
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ data->common(), temp_zone,
+ info->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
@@ -1503,7 +1506,7 @@ struct HeapBrokerInitializationPhase {
DECL_MAIN_THREAD_PIPELINE_PHASE_CONSTANTS(HeapBrokerInitialization)
void Run(PipelineData* data, Zone* temp_zone) {
- data->broker()->InitializeAndStartSerializing(data->native_context());
+ data->broker()->InitializeAndStartSerializing();
}
};
@@ -1547,8 +1550,16 @@ struct SerializationPhase {
data->zone_stats(), data->broker(), data->dependencies(),
data->info()->closure(), flags, data->info()->osr_offset());
if (data->specialization_context().IsJust()) {
- ContextRef(data->broker(),
- data->specialization_context().FromJust().context);
+ MakeRef(data->broker(),
+ data->specialization_context().FromJust().context);
+ }
+ if (FLAG_turbo_concurrent_get_property_access_info) {
+ data->broker()->ClearCachedPropertyAccessInfos();
+ data->dependencies()->ClearForConcurrentGetPropertyAccessInfo();
+ }
+ if (FLAG_stress_concurrent_inlining) {
+ // Force re-serialization from the background thread.
+ data->broker()->ClearReconstructibleData();
}
}
};
@@ -1560,8 +1571,9 @@ struct TypedLoweringPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ DeadCodeElimination dead_code_elimination(
+ &graph_reducer, data->graph(), data->common(), temp_zone,
+ data->info()->concurrent_inlining());
JSCreateLowering create_lowering(&graph_reducer, data->dependencies(),
data->jsgraph(), data->broker(),
temp_zone);
@@ -1579,10 +1591,10 @@ struct TypedLoweringPhase {
data->machine(), temp_zone);
AddReducer(data, &graph_reducer, &dead_code_elimination);
- if (!data->info()->IsNativeContextIndependent()) {
- AddReducer(data, &graph_reducer, &create_lowering);
+ AddReducer(data, &graph_reducer, &create_lowering);
+ if (!data->info()->IsTurboprop()) {
+ AddReducer(data, &graph_reducer, &constant_folding_reducer);
}
- AddReducer(data, &graph_reducer, &constant_folding_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &typed_optimization);
AddReducer(data, &graph_reducer, &simple_reducer);
@@ -1747,8 +1759,9 @@ struct EarlyOptimizationPhase {
GraphReducer graph_reducer(
temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(), data->observe_node_manager());
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ DeadCodeElimination dead_code_elimination(
+ &graph_reducer, data->graph(), data->common(), temp_zone,
+ data->info()->concurrent_inlining());
SimplifiedOperatorReducer simple_reducer(&graph_reducer, data->jsgraph(),
data->broker());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
@@ -1804,11 +1817,6 @@ struct EffectControlLinearizationPhase {
TraceScheduleAndVerify(data->info(), data, schedule,
"effect linearization schedule");
- MaskArrayIndexEnable mask_array_index =
- (data->info()->GetPoisoningMitigationLevel() !=
- PoisoningMitigationLevel::kDontPoison)
- ? MaskArrayIndexEnable::kMaskArrayIndex
- : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
// Post-pass for wiring the control/effects
// - connect allocating representation changes into the control&effect
// chains and lower them,
@@ -1816,7 +1824,7 @@ struct EffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
data->source_positions(), data->node_origins(),
- mask_array_index, MaintainSchedule::kDiscard,
+ data->info()->GetPoisoningMitigationLevel(),
data->broker());
}
{
@@ -1829,8 +1837,9 @@ struct EffectControlLinearizationPhase {
&data->info()->tick_counter(), data->broker(),
data->jsgraph()->Dead(),
data->observe_node_manager());
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ DeadCodeElimination dead_code_elimination(
+ &graph_reducer, data->graph(), data->common(), temp_zone,
+ data->info()->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
@@ -1868,8 +1877,9 @@ struct LoadEliminationPhase {
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone,
BranchElimination::kEARLY);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ DeadCodeElimination dead_code_elimination(
+ &graph_reducer, data->graph(), data->common(), temp_zone,
+ data->info()->concurrent_inlining());
RedundancyElimination redundancy_elimination(&graph_reducer, temp_zone);
LoadElimination load_elimination(&graph_reducer, data->jsgraph(),
temp_zone);
@@ -1936,8 +1946,9 @@ struct LateOptimizationPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ DeadCodeElimination dead_code_elimination(
+ &graph_reducer, data->graph(), data->common(), temp_zone,
+ data->info()->concurrent_inlining());
ValueNumberingReducer value_numbering(temp_zone, data->graph()->zone());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -2000,43 +2011,22 @@ struct ScheduledEffectControlLinearizationPhase {
DECL_PIPELINE_PHASE_CONSTANTS(ScheduledEffectControlLinearization)
void Run(PipelineData* data, Zone* temp_zone) {
- MaskArrayIndexEnable mask_array_index =
- (data->info()->GetPoisoningMitigationLevel() !=
- PoisoningMitigationLevel::kDontPoison)
- ? MaskArrayIndexEnable::kMaskArrayIndex
- : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
// Post-pass for wiring the control/effects
// - connect allocating representation changes into the control&effect
// chains and lower them,
// - get rid of the region markers,
- // - introduce effect phis and rewire effects to get SSA again.
- LinearizeEffectControl(data->jsgraph(), data->schedule(), temp_zone,
+ // - introduce effect phis and rewire effects to get SSA again,
+ // - lower simplified memory and select nodes to machine level nodes.
+ LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone,
data->source_positions(), data->node_origins(),
- mask_array_index, MaintainSchedule::kMaintain,
+ data->info()->GetPoisoningMitigationLevel(),
data->broker());
// TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
- if (FLAG_turbo_verify) Scheduler::GenerateDominatorTree(data->schedule());
- TraceScheduleAndVerify(data->info(), data, data->schedule(),
- "effect linearization schedule");
- }
-};
-
-struct ScheduledMachineLoweringPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(ScheduledMachineLowering)
-
- void Run(PipelineData* data, Zone* temp_zone) {
- ScheduledMachineLowering machine_lowering(
- data->jsgraph(), data->schedule(), temp_zone, data->source_positions(),
- data->node_origins(), data->info()->GetPoisoningMitigationLevel());
- machine_lowering.Run();
-
- // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
- Scheduler::ComputeSpecialRPO(temp_zone, data->schedule());
Scheduler::GenerateDominatorTree(data->schedule());
TraceScheduleAndVerify(data->info(), data, data->schedule(),
- "machine lowered schedule");
+ "effect linearization schedule");
}
};
@@ -2051,8 +2041,9 @@ struct CsaEarlyOptimizationPhase {
allow_signalling_nan);
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ DeadCodeElimination dead_code_elimination(
+ &graph_reducer, data->graph(), data->common(), temp_zone,
+ data->info()->concurrent_inlining());
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->broker(), data->common(),
data->machine(), temp_zone);
@@ -2078,8 +2069,9 @@ struct CsaOptimizationPhase {
data->jsgraph()->Dead(), data->observe_node_manager());
BranchElimination branch_condition_elimination(&graph_reducer,
data->jsgraph(), temp_zone);
- DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
- data->common(), temp_zone);
+ DeadCodeElimination dead_code_elimination(
+ &graph_reducer, data->graph(), data->common(), temp_zone,
+ data->info()->concurrent_inlining());
MachineOperatorReducer machine_reducer(&graph_reducer, data->jsgraph(),
allow_signalling_nan);
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -2653,12 +2645,15 @@ bool PipelineImpl::CreateGraph() {
RunPrintAndVerify(InliningPhase::phase_name(), true);
// Remove dead->live edges from the graph.
- Run<EarlyGraphTrimmingPhase>();
- RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
+ if (!data->info()->IsTurboprop()) {
+ Run<EarlyGraphTrimmingPhase>();
+ RunPrintAndVerify(EarlyGraphTrimmingPhase::phase_name(), true);
+ }
// Determine the Typer operation flags.
{
- SharedFunctionInfoRef shared_info(data->broker(), info()->shared_info());
+ SharedFunctionInfoRef shared_info =
+ MakeRef(data->broker(), info()->shared_info());
if (is_sloppy(shared_info.language_mode()) &&
shared_info.IsUserJavaScript()) {
// Sloppy mode functions always have an Object for this.
@@ -2735,7 +2730,7 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
- DCHECK(FLAG_turbo_inline_js_wasm_calls);
+ DCHECK(data->info()->inline_js_wasm_calls());
Run<WasmInliningPhase>();
RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
}
@@ -2862,9 +2857,6 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
RunPrintAndVerify(ScheduledEffectControlLinearizationPhase::phase_name(),
true);
- Run<ScheduledMachineLoweringPhase>();
- RunPrintAndVerify(ScheduledMachineLoweringPhase::phase_name(), true);
-
data->source_positions()->RemoveDecorator();
if (data->info()->trace_turbo_json()) {
data->node_origins()->RemoveDecorator();
@@ -2966,8 +2958,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
should_optimize_jumps ? &jump_opt : nullptr, options,
profile_data);
PipelineJobScope scope(&data, isolate->counters()->runtime_call_stats());
- RuntimeCallTimerScope timer_scope(isolate,
- RuntimeCallCounterId::kOptimizeCode);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kOptimizeCode);
data.set_verify_graph(FLAG_verify_csa);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
@@ -3290,8 +3281,7 @@ MaybeHandle<Code> Pipeline::GenerateCodeForTesting(
CreatePipelineStatistics(Handle<Script>::null(), info, isolate,
&zone_stats));
- PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get(),
- i::FLAG_concurrent_inlining);
+ PipelineData data(&zone_stats, isolate, info, pipeline_statistics.get());
PipelineImpl pipeline(&data);
Linkage linkage(Linkage::ComputeIncoming(data.instruction_zone(), info));
@@ -3422,8 +3412,10 @@ void PipelineImpl::ComputeScheduledGraph() {
// We should only schedule the graph if it is not scheduled yet.
DCHECK_NULL(data->schedule());
- Run<LateGraphTrimmingPhase>();
- RunPrintAndVerify(LateGraphTrimmingPhase::phase_name(), true);
+ if (!data->info()->IsTurboprop()) {
+ Run<LateGraphTrimmingPhase>();
+ RunPrintAndVerify(LateGraphTrimmingPhase::phase_name(), true);
+ }
Run<ComputeSchedulePhase>();
TraceScheduleAndVerify(data->info(), data, data->schedule(), "schedule");
diff --git a/chromium/v8/src/compiler/processed-feedback.h b/chromium/v8/src/compiler/processed-feedback.h
index 4cecd338c54..78163a23a7c 100644
--- a/chromium/v8/src/compiler/processed-feedback.h
+++ b/chromium/v8/src/compiler/processed-feedback.h
@@ -197,20 +197,24 @@ class MinimorphicLoadPropertyAccessFeedback : public ProcessedFeedback {
class CallFeedback : public ProcessedFeedback {
public:
CallFeedback(base::Optional<HeapObjectRef> target, float frequency,
- SpeculationMode mode, FeedbackSlotKind slot_kind)
+ SpeculationMode mode, CallFeedbackContent call_feedback_content,
+ FeedbackSlotKind slot_kind)
: ProcessedFeedback(kCall, slot_kind),
target_(target),
frequency_(frequency),
- mode_(mode) {}
+ mode_(mode),
+ content_(call_feedback_content) {}
base::Optional<HeapObjectRef> target() const { return target_; }
float frequency() const { return frequency_; }
SpeculationMode speculation_mode() const { return mode_; }
+ CallFeedbackContent call_feedback_content() const { return content_; }
private:
base::Optional<HeapObjectRef> const target_;
float const frequency_;
SpeculationMode const mode_;
+ CallFeedbackContent const content_;
};
template <class T, ProcessedFeedback::Kind K>
diff --git a/chromium/v8/src/compiler/property-access-builder.cc b/chromium/v8/src/compiler/property-access-builder.cc
index c5a70555a7d..317d21a7128 100644
--- a/chromium/v8/src/compiler/property-access-builder.cc
+++ b/chromium/v8/src/compiler/property-access-builder.cc
@@ -35,7 +35,7 @@ SimplifiedOperatorBuilder* PropertyAccessBuilder::simplified() const {
bool HasOnlyStringMaps(JSHeapBroker* broker,
ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
- MapRef map_ref(broker, map);
+ MapRef map_ref = MakeRef(broker, map);
if (!map_ref.IsStringMap()) return false;
}
return true;
@@ -46,7 +46,7 @@ namespace {
bool HasOnlyNumberMaps(JSHeapBroker* broker,
ZoneVector<Handle<Map>> const& maps) {
for (auto map : maps) {
- MapRef map_ref(broker, map);
+ MapRef map_ref = MakeRef(broker, map);
if (map_ref.instance_type() != HEAP_NUMBER_TYPE) return false;
}
return true;
@@ -89,7 +89,7 @@ void PropertyAccessBuilder::BuildCheckMaps(
MapRef object_map = m.Ref(broker()).map();
if (object_map.is_stable()) {
for (Handle<Map> map : maps) {
- if (MapRef(broker(), map).equals(object_map)) {
+ if (MakeRef(broker(), map).equals(object_map)) {
dependencies()->DependOnStableMap(object_map);
return;
}
@@ -99,7 +99,7 @@ void PropertyAccessBuilder::BuildCheckMaps(
ZoneHandleSet<Map> map_set;
CheckMapsFlags flags = CheckMapsFlag::kNone;
for (Handle<Map> map : maps) {
- MapRef object_map(broker(), map);
+ MapRef object_map = MakeRef(broker(), map);
map_set.insert(object_map.object(), graph()->zone());
if (object_map.is_migration_target()) {
flags |= CheckMapsFlag::kTryMigrateInstance;
@@ -153,7 +153,8 @@ Node* PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
DCHECK(V8_DICT_PROPERTY_CONST_TRACKING_BOOL);
DCHECK(access_info.IsDictionaryProtoDataConstant());
- JSObjectRef holder(broker(), access_info.holder().ToHandleChecked());
+ JSObjectRef holder =
+ MakeRef(broker(), access_info.holder().ToHandleChecked());
base::Optional<ObjectRef> value =
holder.GetOwnDictionaryProperty(access_info.dictionary_index());
@@ -163,15 +164,15 @@ Node* PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
if (!map->IsJSReceiverMap()) {
// Perform the implicit ToObject for primitives here.
// Implemented according to ES6 section 7.3.2 GetV (V, P).
- Handle<JSFunction> constructor =
+ JSFunction constructor =
Map::GetConstructorFunction(
- map, broker()->target_native_context().object())
- .ToHandleChecked();
- map = handle(constructor->initial_map(), isolate());
+ *map, *broker()->target_native_context().object())
+ .value();
+ map = handle(constructor.initial_map(), isolate());
DCHECK(map->IsJSObjectMap());
}
dependencies()->DependOnConstantInDictionaryPrototypeChain(
- MapRef{broker(), map}, NameRef{broker(), access_info.name()},
+ MakeRef(broker(), map), MakeRef(broker(), access_info.name()),
value.value(), PropertyKind::kData);
}
@@ -197,7 +198,7 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
if (std::find_if(
access_info.lookup_start_object_maps().begin(),
access_info.lookup_start_object_maps().end(), [&](Handle<Map> map) {
- return MapRef(broker(), map).equals(lookup_start_object_map);
+ return MakeRef(broker(), map).equals(lookup_start_object_map);
}) == access_info.lookup_start_object_maps().end()) {
// The map of the lookup_start_object is not in the feedback, let us bail
// out.
@@ -206,7 +207,7 @@ Node* PropertyAccessBuilder::TryFoldLoadConstantDataField(
holder = m.Ref(broker()).AsJSObject().object();
}
- JSObjectRef holder_ref(broker(), holder);
+ JSObjectRef holder_ref = MakeRef(broker(), holder);
base::Optional<ObjectRef> value = holder_ref.GetOwnFastDataProperty(
access_info.field_representation(), access_info.field_index());
if (!value.has_value()) {
@@ -329,7 +330,7 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
// used by the LoadElimination to eliminate map checks on the result.
Handle<Map> field_map;
if (access_info.field_map().ToHandle(&field_map)) {
- MapRef field_map_ref(broker(), field_map);
+ MapRef field_map_ref = MakeRef(broker(), field_map);
if (field_map_ref.is_stable()) {
dependencies()->DependOnStableMap(field_map_ref);
field_access.map = field_map;
diff --git a/chromium/v8/src/compiler/raw-machine-assembler.cc b/chromium/v8/src/compiler/raw-machine-assembler.cc
index f79776bad99..7ed217d4e36 100644
--- a/chromium/v8/src/compiler/raw-machine-assembler.cc
+++ b/chromium/v8/src/compiler/raw-machine-assembler.cc
@@ -747,7 +747,8 @@ Node* CallCFunctionImpl(
}
for (const auto& arg : args) builder.AddParam(arg.first);
- bool caller_saved_fp_regs = caller_saved_regs && (mode == kSaveFPRegs);
+ bool caller_saved_fp_regs =
+ caller_saved_regs && (mode == SaveFPRegsMode::kSave);
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
if (caller_saved_regs) flags |= CallDescriptor::kCallerSavedRegisters;
if (caller_saved_fp_regs) flags |= CallDescriptor::kCallerSavedFPRegisters;
@@ -772,14 +773,14 @@ Node* RawMachineAssembler::CallCFunction(
Node* function, base::Optional<MachineType> return_type,
std::initializer_list<RawMachineAssembler::CFunctionArg> args) {
return CallCFunctionImpl(this, function, return_type, args, false,
- kDontSaveFPRegs, kHasFunctionDescriptor);
+ SaveFPRegsMode::kIgnore, kHasFunctionDescriptor);
}
Node* RawMachineAssembler::CallCFunctionWithoutFunctionDescriptor(
Node* function, MachineType return_type,
std::initializer_list<RawMachineAssembler::CFunctionArg> args) {
return CallCFunctionImpl(this, function, return_type, args, false,
- kDontSaveFPRegs, kNoFunctionDescriptor);
+ SaveFPRegsMode::kIgnore, kNoFunctionDescriptor);
}
Node* RawMachineAssembler::CallCFunctionWithCallerSavedRegisters(
diff --git a/chromium/v8/src/compiler/raw-machine-assembler.h b/chromium/v8/src/compiler/raw-machine-assembler.h
index 1dff0a7c0c0..a811fa7bf9c 100644
--- a/chromium/v8/src/compiler/raw-machine-assembler.h
+++ b/chromium/v8/src/compiler/raw-machine-assembler.h
@@ -15,6 +15,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/simplified-operator.h"
@@ -147,10 +148,38 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* load = AddNode(op, base, index);
return load;
}
+ Node* LoadImmutable(MachineType type, Node* base) {
+ return LoadImmutable(type, base, IntPtrConstant(0));
+ }
+ Node* LoadImmutable(MachineType type, Node* base, Node* index) {
+ const Operator* op = machine()->LoadImmutable(type);
+ return AddNode(op, base, index);
+ }
+ bool IsMapOffsetConstant(Node* node) {
+ Int64Matcher m(node);
+ if (m.Is(HeapObject::kMapOffset)) return true;
+ // Test if `node` is a `Phi(Int64Constant(0))`
+ if (node->opcode() == IrOpcode::kPhi) {
+ for (Node* input : node->inputs()) {
+ if (!Int64Matcher(input).Is(HeapObject::kMapOffset)) return false;
+ }
+ return true;
+ }
+ return false;
+ }
+ bool IsMapOffsetConstantMinusTag(Node* node) {
+ Int64Matcher m(node);
+ return m.Is(HeapObject::kMapOffset - kHeapObjectTag);
+ }
+ bool IsMapOffsetConstantMinusTag(int offset) {
+ return offset == HeapObject::kMapOffset - kHeapObjectTag;
+ }
Node* LoadFromObject(
MachineType type, Node* base, Node* offset,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe);
+ DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset),
+ type == MachineType::MapInHeader());
ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
Node* load = AddNode(simplified()->LoadFromObject(access), base, offset);
return load;
@@ -169,18 +198,22 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* value, WriteBarrierKind write_barrier) {
ObjectAccess access = {MachineType::TypeForRepresentation(rep),
write_barrier};
+ DCHECK(!IsMapOffsetConstantMinusTag(offset));
AddNode(simplified()->StoreToObject(access), object, offset, value);
}
void OptimizedStoreField(MachineRepresentation rep, Node* object, int offset,
Node* value, WriteBarrierKind write_barrier) {
+ DCHECK(!IsMapOffsetConstantMinusTag(offset));
AddNode(simplified()->StoreField(FieldAccess(
BaseTaggedness::kTaggedBase, offset, MaybeHandle<Name>(),
MaybeHandle<Map>(), Type::Any(),
MachineType::TypeForRepresentation(rep), write_barrier)),
object, value);
}
- void OptimizedStoreMap(Node* object, Node* value) {
- AddNode(simplified()->StoreField(AccessBuilder::ForMap()), object, value);
+ void OptimizedStoreMap(Node* object, Node* value,
+ WriteBarrierKind write_barrier = kMapWriteBarrier) {
+ AddNode(simplified()->StoreField(AccessBuilder::ForMap(write_barrier)),
+ object, value);
}
Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
@@ -245,6 +278,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
Node* value) {
+ DCHECK(!IsMapOffsetConstantMinusTag(index));
DCHECK_NE(rep, MachineRepresentation::kWord64);
return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
}
@@ -547,6 +581,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* Word32Ctz(Node* a) { return AddNode(machine()->Word32Ctz().op(), a); }
Node* Word64Ctz(Node* a) { return AddNode(machine()->Word64Ctz().op(), a); }
+
+ Node* Word32Select(Node* condition, Node* b, Node* c) {
+ return AddNode(machine()->Word32Select().op(), condition, b, c);
+ }
+
+ Node* Word64Select(Node* condition, Node* b, Node* c) {
+ return AddNode(machine()->Word64Select().op(), condition, b, c);
+ }
+
Node* StackPointerGreaterThan(Node* value) {
return AddNode(
machine()->StackPointerGreaterThan(StackCheckKind::kCodeStubAssembler),
diff --git a/chromium/v8/src/compiler/refs-map.cc b/chromium/v8/src/compiler/refs-map.cc
index 7cf465f00bc..149865b0933 100644
--- a/chromium/v8/src/compiler/refs-map.cc
+++ b/chromium/v8/src/compiler/refs-map.cc
@@ -27,6 +27,10 @@ RefsMap::Entry* RefsMap::LookupOrInsert(const Address& key) {
[]() { return nullptr; });
}
+ObjectData* RefsMap::Remove(const Address& key) {
+ return UnderlyingMap::Remove(key, RefsMap::Hash(key));
+}
+
uint32_t RefsMap::Hash(Address addr) { return static_cast<uint32_t>(addr); }
} // namespace compiler
diff --git a/chromium/v8/src/compiler/refs-map.h b/chromium/v8/src/compiler/refs-map.h
index 708da8359f9..3b26276072d 100644
--- a/chromium/v8/src/compiler/refs-map.h
+++ b/chromium/v8/src/compiler/refs-map.h
@@ -42,6 +42,7 @@ class RefsMap
// Wrappers around methods from UnderlyingMap
Entry* Lookup(const Address& key) const;
Entry* LookupOrInsert(const Address& key);
+ ObjectData* Remove(const Address& key);
private:
static uint32_t Hash(Address addr);
diff --git a/chromium/v8/src/compiler/representation-change.cc b/chromium/v8/src/compiler/representation-change.cc
index 4bfaa1cceea..750bc1fd563 100644
--- a/chromium/v8/src/compiler/representation-change.cc
+++ b/chromium/v8/src/compiler/representation-change.cc
@@ -237,6 +237,7 @@ Node* RepresentationChanger::GetRepresentationFor(
return node;
case MachineRepresentation::kCompressed:
case MachineRepresentation::kCompressedPointer:
+ case MachineRepresentation::kMapWord:
UNREACHABLE();
}
UNREACHABLE();
@@ -510,7 +511,8 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
break;
}
if (output_rep == MachineRepresentation::kTaggedSigned ||
- output_rep == MachineRepresentation::kTaggedPointer) {
+ output_rep == MachineRepresentation::kTaggedPointer ||
+ output_rep == MachineRepresentation::kMapWord) {
// this is a no-op.
return node;
}
@@ -795,12 +797,12 @@ Node* RepresentationChanger::MakeTruncatedInt32Constant(double value) {
return jsgraph()->Int32Constant(DoubleToInt32(value));
}
-Node* RepresentationChanger::InsertUnconditionalDeopt(Node* node,
- DeoptimizeReason reason) {
+Node* RepresentationChanger::InsertUnconditionalDeopt(
+ Node* node, DeoptimizeReason reason, const FeedbackSource& feedback) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
effect =
- jsgraph()->graph()->NewNode(simplified()->CheckIf(reason),
+ jsgraph()->graph()->NewNode(simplified()->CheckIf(reason, feedback),
jsgraph()->Int32Constant(0), effect, control);
Node* unreachable = effect = jsgraph()->graph()->NewNode(
jsgraph()->common()->Unreachable(), effect, control);
@@ -1102,7 +1104,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
HeapObjectMatcher m(node);
if (m.HasResolvedValue() && m.Ref(broker_).IsBigInt() &&
use_info.truncation().IsUsedAsWord64()) {
- auto bigint = m.Ref(broker_).AsBigInt();
+ BigIntRef bigint = m.Ref(broker_).AsBigInt();
return jsgraph()->Int64Constant(
static_cast<int64_t>(bigint.AsUint64()));
}
@@ -1117,8 +1119,8 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
if (!CanBeTaggedPointer(output_rep) &&
output_rep != MachineRepresentation::kWord64) {
DCHECK(!output_type.Equals(Type::BigInt()));
- Node* unreachable =
- InsertUnconditionalDeopt(use_node, DeoptimizeReason::kNotABigInt);
+ Node* unreachable = InsertUnconditionalDeopt(
+ use_node, DeoptimizeReason::kNotABigInt, use_info.feedback());
return jsgraph()->graph()->NewNode(
jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
unreachable);
@@ -1157,11 +1159,11 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat32) {
- if (output_type.Is(cache_->kInt64)) {
+ if (output_type.Is(cache_->kDoubleRepresentableInt64)) {
// float32 -> float64 -> int64
node = InsertChangeFloat32ToFloat64(node);
op = machine()->ChangeFloat64ToInt64();
- } else if (output_type.Is(cache_->kUint64)) {
+ } else if (output_type.Is(cache_->kDoubleRepresentableUint64)) {
// float32 -> float64 -> uint64
node = InsertChangeFloat32ToFloat64(node);
op = machine()->ChangeFloat64ToUint64();
@@ -1179,9 +1181,9 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
MachineRepresentation::kWord64);
}
} else if (output_rep == MachineRepresentation::kFloat64) {
- if (output_type.Is(cache_->kInt64)) {
+ if (output_type.Is(cache_->kDoubleRepresentableInt64)) {
op = machine()->ChangeFloat64ToInt64();
- } else if (output_type.Is(cache_->kUint64)) {
+ } else if (output_type.Is(cache_->kDoubleRepresentableUint64)) {
op = machine()->ChangeFloat64ToUint64();
} else if (use_info.type_check() == TypeCheckKind::kSigned64 ||
use_info.type_check() == TypeCheckKind::kArrayIndex) {
@@ -1209,7 +1211,7 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
use_node, use_info);
op = simplified()->TruncateBigIntToUint64();
} else if (CanBeTaggedPointer(output_rep)) {
- if (output_type.Is(cache_->kInt64)) {
+ if (output_type.Is(cache_->kDoubleRepresentableInt64)) {
op = simplified()->ChangeTaggedToInt64();
} else if (use_info.type_check() == TypeCheckKind::kSigned64) {
op = simplified()->CheckedTaggedToInt64(
@@ -1228,8 +1230,11 @@ Node* RepresentationChanger::GetWord64RepresentationFor(
if (output_type.Is(Type::BigInt())) {
return node;
} else {
- return TypeError(node, output_rep, output_type,
- MachineRepresentation::kWord64);
+ Node* unreachable = InsertUnconditionalDeopt(
+ use_node, DeoptimizeReason::kNotABigInt, use_info.feedback());
+ return jsgraph()->graph()->NewNode(
+ jsgraph()->common()->DeadValue(MachineRepresentation::kWord64),
+ unreachable);
}
} else {
return TypeError(node, output_rep, output_type,
diff --git a/chromium/v8/src/compiler/representation-change.h b/chromium/v8/src/compiler/representation-change.h
index 334237ed1fa..a4b05cbfe79 100644
--- a/chromium/v8/src/compiler/representation-change.h
+++ b/chromium/v8/src/compiler/representation-change.h
@@ -16,7 +16,7 @@ namespace compiler {
// Foward declarations.
class TypeCache;
-enum IdentifyZeros { kIdentifyZeros, kDistinguishZeros };
+enum IdentifyZeros : uint8_t { kIdentifyZeros, kDistinguishZeros };
class Truncation final {
public:
@@ -180,10 +180,10 @@ class UseInfo {
static UseInfo TruncatingWord32() {
return UseInfo(MachineRepresentation::kWord32, Truncation::Word32());
}
- static UseInfo TruncatingWord64() {
- return UseInfo(MachineRepresentation::kWord64, Truncation::Word64());
- }
static UseInfo CheckedBigIntTruncatingWord64(const FeedbackSource& feedback) {
+ // Note that Trunction::Word64() can safely use kIdentifyZero, because
+ // TypeCheckKind::kBigInt will make sure we deopt for anything other than
+ // type BigInt anyway.
return UseInfo(MachineRepresentation::kWord64, Truncation::Word64(),
TypeCheckKind::kBigInt, feedback);
}
@@ -400,7 +400,8 @@ class V8_EXPORT_PRIVATE RepresentationChanger final {
Node* use_node);
Node* InsertConversion(Node* node, const Operator* op, Node* use_node);
Node* InsertTruncateInt64ToInt32(Node* node);
- Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason);
+ Node* InsertUnconditionalDeopt(Node* node, DeoptimizeReason reason,
+ const FeedbackSource& feedback = {});
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
diff --git a/chromium/v8/src/compiler/scheduled-machine-lowering.cc b/chromium/v8/src/compiler/scheduled-machine-lowering.cc
deleted file mode 100644
index fde836e4e83..00000000000
--- a/chromium/v8/src/compiler/scheduled-machine-lowering.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/scheduled-machine-lowering.h"
-
-#include "src/compiler/compiler-source-position-table.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-origin-table.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/schedule.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-ScheduledMachineLowering::ScheduledMachineLowering(
- JSGraph* js_graph, Schedule* schedule, Zone* temp_zone,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level)
- : schedule_(schedule),
- graph_assembler_(js_graph, temp_zone, base::nullopt, schedule),
- select_lowering_(&graph_assembler_, js_graph->graph()),
- memory_lowering_(js_graph, temp_zone, &graph_assembler_, poison_level),
- reducers_({&select_lowering_, &memory_lowering_}, temp_zone),
- source_positions_(source_positions),
- node_origins_(node_origins) {}
-
-void ScheduledMachineLowering::Run() {
- // TODO(rmcilroy) We should not depend on having rpo_order on schedule, and
- // instead just do our own RPO walk here.
- for (BasicBlock* block : *(schedule()->rpo_order())) {
- BasicBlock::iterator instr = block->begin();
- BasicBlock::iterator end_instr = block->end();
- gasm()->Reset(block);
-
- for (; instr != end_instr; instr++) {
- Node* node = *instr;
- Reduction reduction;
- for (auto reducer : reducers_) {
- reduction = reducer->Reduce(node, nullptr);
- if (reduction.Changed()) break;
- }
- if (reduction.Changed()) {
- Node* replacement = reduction.replacement();
- if (replacement != node) {
- // Replace all uses of node and kill the node to make sure we don't
- // leave dangling dead uses.
- NodeProperties::ReplaceUses(node, replacement, gasm()->effect(),
- gasm()->control());
- node->Kill();
- } else {
- gasm()->AddNode(replacement);
- }
- } else {
- gasm()->AddNode(node);
- }
- }
-
- gasm()->FinalizeCurrentBlock(block);
- }
-
- schedule_->rpo_order()->clear();
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/compiler/scheduled-machine-lowering.h b/chromium/v8/src/compiler/scheduled-machine-lowering.h
deleted file mode 100644
index ca078a2a534..00000000000
--- a/chromium/v8/src/compiler/scheduled-machine-lowering.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_SCHEDULED_MACHINE_LOWERING_H_
-#define V8_COMPILER_SCHEDULED_MACHINE_LOWERING_H_
-
-#include "src/compiler/graph-assembler.h"
-#include "src/compiler/memory-lowering.h"
-#include "src/compiler/select-lowering.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class NodeOriginTable;
-class Schedule;
-class SourcePositionTable;
-
-// Performs machine lowering on an already scheduled graph.
-class ScheduledMachineLowering final {
- public:
- ScheduledMachineLowering(JSGraph* js_graph, Schedule* schedule,
- Zone* temp_zone,
- SourcePositionTable* source_positions,
- NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level);
- ~ScheduledMachineLowering() = default;
-
- void Run();
-
- private:
- bool LowerNode(Node* node);
-
- JSGraphAssembler* gasm() { return &graph_assembler_; }
- Schedule* schedule() { return schedule_; }
-
- Schedule* schedule_;
- JSGraphAssembler graph_assembler_;
- SelectLowering select_lowering_;
- MemoryLowering memory_lowering_;
- ZoneVector<Reducer*> reducers_;
- SourcePositionTable* source_positions_;
- NodeOriginTable* node_origins_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_SCHEDULED_MACHINE_LOWERING_H_
diff --git a/chromium/v8/src/compiler/scheduler.cc b/chromium/v8/src/compiler/scheduler.cc
index fdfa16052b4..07a716bfa7a 100644
--- a/chromium/v8/src/compiler/scheduler.cc
+++ b/chromium/v8/src/compiler/scheduler.cc
@@ -105,15 +105,8 @@ Scheduler::Placement Scheduler::InitializePlacement(Node* node) {
data->placement_ = (p == kFixed ? kFixed : kCoupled);
break;
}
-#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
- CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
-#undef DEFINE_CONTROL_CASE
- {
- // Control nodes that were not control-reachable from end may float.
- data->placement_ = kSchedulable;
- break;
- }
default:
+ // Control nodes that were not control-reachable from end may float.
data->placement_ = kSchedulable;
break;
}
@@ -172,31 +165,32 @@ void Scheduler::UpdatePlacement(Node* node, Placement placement) {
// Reduce the use count of the node's inputs to potentially make them
// schedulable. If all the uses of a node have been scheduled, then the node
// itself can be scheduled.
+ base::Optional<int> coupled_control_edge = GetCoupledControlEdge(node);
for (Edge const edge : node->input_edges()) {
- DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
+ DCHECK_EQ(node, edge.from());
+ if (edge.index() != coupled_control_edge) {
+ DecrementUnscheduledUseCount(edge.to(), node);
+ }
}
data->placement_ = placement;
}
-
-bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
- return GetPlacement(node) == kCoupled &&
- NodeProperties::FirstControlIndex(node) == index;
+base::Optional<int> Scheduler::GetCoupledControlEdge(Node* node) {
+ if (GetPlacement(node) == kCoupled) {
+ return NodeProperties::FirstControlIndex(node);
+ }
+ return {};
}
-
-void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
- Node* from) {
- // Make sure that control edges from coupled nodes are not counted.
- if (IsCoupledControlEdge(from, index)) return;
-
+void Scheduler::IncrementUnscheduledUseCount(Node* node, Node* from) {
// Tracking use counts for fixed nodes is useless.
if (GetPlacement(node) == kFixed) return;
// Use count for coupled nodes is summed up on their control.
if (GetPlacement(node) == kCoupled) {
- Node* control = NodeProperties::GetControlInput(node);
- return IncrementUnscheduledUseCount(control, index, from);
+ node = NodeProperties::GetControlInput(node);
+ DCHECK_NE(GetPlacement(node), Placement::kFixed);
+ DCHECK_NE(GetPlacement(node), Placement::kCoupled);
}
++(GetData(node)->unscheduled_count_);
@@ -207,19 +201,15 @@ void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
}
}
-
-void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
- Node* from) {
- // Make sure that control edges from coupled nodes are not counted.
- if (IsCoupledControlEdge(from, index)) return;
-
+void Scheduler::DecrementUnscheduledUseCount(Node* node, Node* from) {
// Tracking use counts for fixed nodes is useless.
if (GetPlacement(node) == kFixed) return;
// Use count for coupled nodes is summed up on their control.
if (GetPlacement(node) == kCoupled) {
- Node* control = NodeProperties::GetControlInput(node);
- return DecrementUnscheduledUseCount(control, index, from);
+ node = NodeProperties::GetControlInput(node);
+ DCHECK_NE(GetPlacement(node), Placement::kFixed);
+ DCHECK_NE(GetPlacement(node), Placement::kCoupled);
}
DCHECK_LT(0, GetData(node)->unscheduled_count_);
@@ -235,7 +225,6 @@ void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
}
}
-
// -----------------------------------------------------------------------------
// Phase 1: Build control-flow graph.
@@ -738,6 +727,8 @@ class SpecialRPONumberer : public ZoneObject {
return empty_;
}
+ bool HasLoopBlocks() const { return loops_.size() != 0; }
+
private:
using Backedge = std::pair<BasicBlock*, size_t>;
@@ -1221,10 +1212,26 @@ void Scheduler::GenerateDominatorTree() {
class PrepareUsesVisitor {
public:
- explicit PrepareUsesVisitor(Scheduler* scheduler)
- : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+ explicit PrepareUsesVisitor(Scheduler* scheduler, Graph* graph, Zone* zone)
+ : scheduler_(scheduler),
+ schedule_(scheduler->schedule_),
+ graph_(graph),
+ visited_(graph_->NodeCount(), false, zone),
+ stack_(zone) {}
+
+ void Run() {
+ InitializePlacement(graph_->end());
+ while (!stack_.empty()) {
+ Node* node = stack_.top();
+ stack_.pop();
+ VisitInputs(node);
+ }
+ }
- void Pre(Node* node) {
+ private:
+ void InitializePlacement(Node* node) {
+ TRACE("Pre #%d:%s\n", node->id(), node->op()->mnemonic());
+ DCHECK(!Visited(node));
if (scheduler_->InitializePlacement(node) == Scheduler::kFixed) {
// Fixed nodes are always roots for schedule late.
scheduler_->schedule_root_nodes_.push_back(node);
@@ -1241,21 +1248,37 @@ class PrepareUsesVisitor {
schedule_->AddNode(block, node);
}
}
+ stack_.push(node);
+ visited_[node->id()] = true;
}
- void PostEdge(Node* from, int index, Node* to) {
- // If the edge is from an unscheduled node, then tally it in the use count
- // for all of its inputs. The same criterion will be used in ScheduleLate
- // for decrementing use counts.
- if (!schedule_->IsScheduled(from)) {
- DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
- scheduler_->IncrementUnscheduledUseCount(to, index, from);
+ void VisitInputs(Node* node) {
+ DCHECK_NE(scheduler_->GetPlacement(node), Scheduler::kUnknown);
+ bool is_scheduled = schedule_->IsScheduled(node);
+ base::Optional<int> coupled_control_edge =
+ scheduler_->GetCoupledControlEdge(node);
+ for (auto edge : node->input_edges()) {
+ Node* to = edge.to();
+ DCHECK_EQ(node, edge.from());
+ if (!Visited(to)) {
+ InitializePlacement(to);
+ }
+ TRACE("PostEdge #%d:%s->#%d:%s\n", node->id(), node->op()->mnemonic(),
+ to->id(), to->op()->mnemonic());
+ DCHECK_NE(scheduler_->GetPlacement(to), Scheduler::kUnknown);
+ if (!is_scheduled && edge.index() != coupled_control_edge) {
+ scheduler_->IncrementUnscheduledUseCount(to, node);
+ }
}
}
- private:
+ bool Visited(Node* node) { return visited_[node->id()]; }
+
Scheduler* scheduler_;
Schedule* schedule_;
+ Graph* graph_;
+ BoolVector visited_;
+ ZoneStack<Node*> stack_;
};
@@ -1264,28 +1287,8 @@ void Scheduler::PrepareUses() {
// Count the uses of every node, which is used to ensure that all of a
// node's uses are scheduled before the node itself.
- PrepareUsesVisitor prepare_uses(this);
-
- // TODO(turbofan): simplify the careful pre/post ordering here.
- BoolVector visited(graph_->NodeCount(), false, zone_);
- ZoneStack<Node::InputEdges::iterator> stack(zone_);
- Node* node = graph_->end();
- prepare_uses.Pre(node);
- visited[node->id()] = true;
- stack.push(node->input_edges().begin());
- while (!stack.empty()) {
- tick_counter_->TickAndMaybeEnterSafepoint();
- Edge edge = *stack.top();
- Node* node = edge.to();
- if (visited[node->id()]) {
- prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
- if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
- } else {
- prepare_uses.Pre(node);
- visited[node->id()] = true;
- if (node->InputCount() > 0) stack.push(node->input_edges().begin());
- }
- }
+ PrepareUsesVisitor prepare_uses(this, graph_, zone_);
+ prepare_uses.Run();
}
@@ -1302,11 +1305,12 @@ class ScheduleEarlyNodeVisitor {
void Run(NodeVector* roots) {
for (Node* const root : *roots) {
queue_.push(root);
- while (!queue_.empty()) {
- scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
- VisitNode(queue_.front());
- queue_.pop();
- }
+ }
+
+ while (!queue_.empty()) {
+ scheduler_->tick_counter_->TickAndMaybeEnterSafepoint();
+ VisitNode(queue_.front());
+ queue_.pop();
}
}
@@ -1380,6 +1384,11 @@ class ScheduleEarlyNodeVisitor {
void Scheduler::ScheduleEarly() {
+ if (!special_rpo_->HasLoopBlocks()) {
+ TRACE("--- NO LOOPS SO SKIPPING SCHEDULE EARLY --------------------\n");
+ return;
+ }
+
TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
TRACE("roots: ");
@@ -1457,6 +1466,7 @@ class ScheduleLateNodeVisitor {
// The schedule early block dominates the schedule late block.
BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
+
TRACE(
"Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
node->id(), node->op()->mnemonic(), block->id().ToInt(),
@@ -1468,6 +1478,7 @@ class ScheduleLateNodeVisitor {
BasicBlock* hoist_block = GetHoistBlock(block);
if (hoist_block &&
hoist_block->dominator_depth() >= min_block->dominator_depth()) {
+ DCHECK(scheduler_->special_rpo_->HasLoopBlocks());
do {
TRACE(" hoisting #%d:%s to block id:%d\n", node->id(),
node->op()->mnemonic(), hoist_block->id().ToInt());
@@ -1597,6 +1608,7 @@ class ScheduleLateNodeVisitor {
}
BasicBlock* GetHoistBlock(BasicBlock* block) {
+ if (!scheduler_->special_rpo_->HasLoopBlocks()) return nullptr;
if (block->IsLoopHeader()) return block->dominator();
// We have to check to make sure that the {block} dominates all
// of the outgoing blocks. If it doesn't, then there is a path
@@ -1717,9 +1729,13 @@ class ScheduleLateNodeVisitor {
Node* CloneNode(Node* node) {
int const input_count = node->InputCount();
+ base::Optional<int> coupled_control_edge =
+ scheduler_->GetCoupledControlEdge(node);
for (int index = 0; index < input_count; ++index) {
- Node* const input = node->InputAt(index);
- scheduler_->IncrementUnscheduledUseCount(input, index, node);
+ if (index != coupled_control_edge) {
+ Node* const input = node->InputAt(index);
+ scheduler_->IncrementUnscheduledUseCount(input, node);
+ }
}
Node* const copy = scheduler_->graph_->CloneNode(node);
TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
diff --git a/chromium/v8/src/compiler/scheduler.h b/chromium/v8/src/compiler/scheduler.h
index d8934ec1572..9d9fd6aed0d 100644
--- a/chromium/v8/src/compiler/scheduler.h
+++ b/chromium/v8/src/compiler/scheduler.h
@@ -103,9 +103,10 @@ class V8_EXPORT_PRIVATE Scheduler {
void UpdatePlacement(Node* node, Placement placement);
bool IsLive(Node* node);
- inline bool IsCoupledControlEdge(Node* node, int index);
- void IncrementUnscheduledUseCount(Node* node, int index, Node* from);
- void DecrementUnscheduledUseCount(Node* node, int index, Node* from);
+ // If the node is coupled, returns the coupled control edge index.
+ inline base::Optional<int> GetCoupledControlEdge(Node* node);
+ void IncrementUnscheduledUseCount(Node* node, Node* from);
+ void DecrementUnscheduledUseCount(Node* node, Node* from);
static void PropagateImmediateDominators(BasicBlock* block);
diff --git a/chromium/v8/src/compiler/select-lowering.cc b/chromium/v8/src/compiler/select-lowering.cc
index 590df6ae1f0..e346e9171d7 100644
--- a/chromium/v8/src/compiler/select-lowering.cc
+++ b/chromium/v8/src/compiler/select-lowering.cc
@@ -34,7 +34,11 @@ Reduction SelectLowering::LowerSelect(Node* node) {
Node* vtrue = node->InputAt(1);
Node* vfalse = node->InputAt(2);
- gasm()->InitializeEffectControl(start(), start());
+ bool reset_gasm = false;
+ if (gasm()->control() == nullptr) {
+ gasm()->InitializeEffectControl(start(), start());
+ reset_gasm = true;
+ }
auto done = __ MakeLabel(p.representation());
@@ -42,6 +46,10 @@ Reduction SelectLowering::LowerSelect(Node* node) {
__ Goto(&done, vfalse);
__ Bind(&done);
+ if (reset_gasm) {
+ gasm()->Reset(nullptr);
+ }
+
return Changed(done.PhiAt(0));
}
diff --git a/chromium/v8/src/compiler/serializer-for-background-compilation.cc b/chromium/v8/src/compiler/serializer-for-background-compilation.cc
index ad6879a02dd..893d12f1e9c 100644
--- a/chromium/v8/src/compiler/serializer-for-background-compilation.cc
+++ b/chromium/v8/src/compiler/serializer-for-background-compilation.cc
@@ -21,6 +21,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/shared-function-info-inl.h"
+#include "src/objects/template-objects-inl.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -1071,8 +1072,9 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
CompilationSubject(closure, broker_->isolate(), zone()))),
arguments_(zone()) {
closure_hints_.AddConstant(closure, zone(), broker_);
- JSFunctionRef(broker, closure).Serialize();
- JSFunctionRef(broker, closure).SerializeCodeAndFeedback();
+ JSFunctionRef closure_ref = MakeRef(broker, closure);
+ closure_ref.Serialize();
+ closure_ref.SerializeCodeAndFeedback();
TRACE_BROKER(broker_, "Hints for <closure>: " << closure_hints_);
TRACE_BROKER(broker_, "Initial environment:\n" << *environment_);
@@ -1099,8 +1101,9 @@ SerializerForBackgroundCompilation::SerializerForBackgroundCompilation(
Handle<JSFunction> closure;
if (function.closure().ToHandle(&closure)) {
closure_hints_.AddConstant(closure, zone(), broker);
- JSFunctionRef(broker, closure).Serialize();
- JSFunctionRef(broker, closure).SerializeCodeAndFeedback();
+ JSFunctionRef closure_ref = MakeRef(broker, closure);
+ closure_ref.Serialize();
+ closure_ref.SerializeCodeAndFeedback();
} else {
closure_hints_.AddVirtualClosure(function.virtual_closure(), zone(),
broker);
@@ -1145,8 +1148,8 @@ Hints SerializerForBackgroundCompilation::Run() {
TRACE_BROKER_MEMORY(broker(), "[serializer start] Broker zone usage: "
<< broker()->zone()->allocation_size());
- SharedFunctionInfoRef shared(broker(), function().shared());
- FeedbackVectorRef feedback_vector_ref(broker(), feedback_vector());
+ SharedFunctionInfoRef shared = MakeRef(broker(), function().shared());
+ FeedbackVectorRef feedback_vector_ref = MakeRef(broker(), feedback_vector());
if (!broker()->ShouldBeSerializedForCompilation(shared, feedback_vector_ref,
arguments_)) {
TRACE_BROKER(broker(),
@@ -1342,7 +1345,7 @@ void SerializerForBackgroundCompilation::VisitGetIterator(
FeedbackSlot call_slot = iterator->GetSlotOperand(2);
Handle<Name> name = broker()->isolate()->factory()->iterator_symbol();
- ProcessNamedPropertyAccess(receiver, NameRef(broker(), name), load_slot,
+ ProcessNamedPropertyAccess(receiver, MakeRef(broker(), name), load_slot,
AccessMode::kLoad);
if (environment()->IsDead()) return;
@@ -1360,10 +1363,10 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
for (auto constant : environment()->accumulator_hints().constants()) {
// For JSNativeContextSpecialization::ReduceJSGetSuperConstructor.
if (!constant->IsJSFunction()) continue;
- MapRef map(broker(),
- handle(HeapObject::cast(*constant).map(), broker()->isolate()));
+ MapRef map = MakeRef(broker(), handle(HeapObject::cast(*constant).map(),
+ broker()->isolate()));
map.SerializePrototype();
- ObjectRef proto = map.prototype();
+ ObjectRef proto = map.prototype().value();
if (proto.IsHeapObject() && proto.AsHeapObject().map().is_constructor()) {
result_hints.AddConstant(proto.object(), zone(), broker());
}
@@ -1373,8 +1376,9 @@ void SerializerForBackgroundCompilation::VisitGetSuperConstructor(
void SerializerForBackgroundCompilation::VisitGetTemplateObject(
BytecodeArrayIterator* iterator) {
- TemplateObjectDescriptionRef description(
- broker(), iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+ MakeRef(broker(),
+ Handle<TemplateObjectDescription>::cast(
+ iterator->GetConstantForIndexOperand(0, broker()->isolate())));
FeedbackSlot slot = iterator->GetSlotOperand(1);
FeedbackSource source(feedback_vector(), slot);
@@ -1511,12 +1515,12 @@ void SerializerForBackgroundCompilation::VisitInvokeIntrinsic(
case Runtime::kInlineGetImportMetaObject: {
Hints const& context_hints = environment()->current_context_hints();
for (auto x : context_hints.constants()) {
- ContextRef(broker(), x)
+ MakeRef(broker(), Handle<Context>::cast(x))
.GetModule(SerializationPolicy::kSerializeIfNeeded)
.Serialize();
}
for (auto x : context_hints.virtual_contexts()) {
- ContextRef(broker(), x.context)
+ MakeRef(broker(), Handle<Context>::cast(x.context))
.GetModule(SerializationPolicy::kSerializeIfNeeded)
.Serialize();
}
@@ -1535,7 +1539,7 @@ void SerializerForBackgroundCompilation::VisitLdaConstant(
iterator->GetConstantForIndexOperand(0, broker()->isolate());
// TODO(v8:7790): FixedArrays still need to be serialized until they are
// moved to kNeverSerialized.
- if (!FLAG_turbo_direct_heap_access || constant->IsFixedArray()) {
+ if (!broker()->is_concurrent_inlining() || constant->IsFixedArray()) {
ObjectRef(broker(), constant);
}
environment()->accumulator_hints() = Hints::SingleConstant(constant, zone());
@@ -1579,7 +1583,7 @@ void SerializerForBackgroundCompilation::ProcessContextAccess(
for (auto x : context_hints.constants()) {
if (x->IsContext()) {
// Walk this context to the given depth and serialize the slot found.
- ContextRef context_ref(broker(), x);
+ ContextRef context_ref = MakeRef(broker(), Handle<Context>::cast(x));
size_t remaining_depth = depth;
context_ref = context_ref.previous(
&remaining_depth, SerializationPolicy::kSerializeIfNeeded);
@@ -1590,7 +1594,8 @@ void SerializerForBackgroundCompilation::ProcessContextAccess(
}
for (auto x : context_hints.virtual_contexts()) {
if (x.distance <= static_cast<unsigned int>(depth)) {
- ContextRef context_ref(broker(), x.context);
+ ContextRef context_ref =
+ MakeRef(broker(), Handle<Context>::cast(x.context));
size_t remaining_depth = depth - x.distance;
context_ref = context_ref.previous(
&remaining_depth, SerializationPolicy::kSerializeIfNeeded);
@@ -1722,7 +1727,7 @@ void SerializerForBackgroundCompilation::VisitCreateRegExpLiteral(
BytecodeArrayIterator* iterator) {
Handle<String> constant_pattern = Handle<String>::cast(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- StringRef description(broker(), constant_pattern);
+ MakeRef(broker(), constant_pattern);
FeedbackSlot slot = iterator->GetSlotOperand(1);
FeedbackSource source(feedback_vector(), slot);
broker()->ProcessFeedbackForRegExpLiteral(source);
@@ -1734,8 +1739,7 @@ void SerializerForBackgroundCompilation::VisitCreateArrayLiteral(
Handle<ArrayBoilerplateDescription> array_boilerplate_description =
Handle<ArrayBoilerplateDescription>::cast(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- ArrayBoilerplateDescriptionRef description(broker(),
- array_boilerplate_description);
+ MakeRef(broker(), array_boilerplate_description);
FeedbackSlot slot = iterator->GetSlotOperand(1);
FeedbackSource source(feedback_vector(), slot);
broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
@@ -1755,7 +1759,7 @@ void SerializerForBackgroundCompilation::VisitCreateObjectLiteral(
Handle<ObjectBoilerplateDescription> constant_properties =
Handle<ObjectBoilerplateDescription>::cast(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
- ObjectBoilerplateDescriptionRef description(broker(), constant_properties);
+ MakeRef(broker(), constant_properties);
FeedbackSlot slot = iterator->GetSlotOperand(1);
FeedbackSource source(feedback_vector(), slot);
broker()->ProcessFeedbackForArrayOrObjectLiteral(source);
@@ -1804,7 +1808,7 @@ void SerializerForBackgroundCompilation::ProcessCreateContext(
Handle<ScopeInfo> scope_info =
Handle<ScopeInfo>::cast(iterator->GetConstantForIndexOperand(
scopeinfo_operand_index, broker()->isolate()));
- ScopeInfoRef scope_info_ref(broker(), scope_info);
+ ScopeInfoRef scope_info_ref = MakeRef(broker(), scope_info);
scope_info_ref.SerializeScopeInfoChain();
Hints const& current_context_hints = environment()->current_context_hints();
@@ -1836,9 +1840,9 @@ void SerializerForBackgroundCompilation::VisitCreateClosure(
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
Handle<FeedbackCell> feedback_cell =
feedback_vector()->GetClosureFeedbackCell(iterator->GetIndexOperand(1));
- FeedbackCellRef feedback_cell_ref(broker(), feedback_cell);
+ MakeRef(broker(), feedback_cell);
Handle<Object> cell_value(feedback_cell->value(), broker()->isolate());
- ObjectRef cell_value_ref(broker(), cell_value);
+ MakeRef(broker(), cell_value);
Hints result_hints;
if (cell_value->IsFeedbackVector()) {
@@ -2109,8 +2113,8 @@ void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
const HintsVector* actual_arguments = &arguments;
HintsVector expanded_arguments(zone());
if (callee->IsJSBoundFunction()) {
- JSBoundFunctionRef bound_function(broker(),
- Handle<JSBoundFunction>::cast(callee));
+ JSBoundFunctionRef bound_function =
+ MakeRef(broker(), Handle<JSBoundFunction>::cast(callee));
if (!bound_function.Serialize()) return;
callee = UnrollBoundFunction(bound_function, broker(), arguments,
&expanded_arguments, zone())
@@ -2119,7 +2123,7 @@ void SerializerForBackgroundCompilation::ProcessCalleeForCallOrConstruct(
}
if (!callee->IsJSFunction()) return;
- JSFunctionRef function(broker(), Handle<JSFunction>::cast(callee));
+ JSFunctionRef function = MakeRef(broker(), Handle<JSFunction>::cast(callee));
function.Serialize();
Callee new_callee(function.object());
ProcessCalleeForCallOrConstruct(new_callee, new_target, *actual_arguments,
@@ -2266,13 +2270,13 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver}) {
ObjectRef(broker(), broker()->isolate()->builtins()->builtin_handle(b));
}
- FunctionTemplateInfoRef target_template_info(
- broker(),
- broker()->CanonicalPersistentHandle(target->function_data(kAcquireLoad)));
+ FunctionTemplateInfoRef target_template_info =
+ MakeRef(broker(),
+ FunctionTemplateInfo::cast(target->function_data(kAcquireLoad)));
if (!target_template_info.has_call_code()) return;
target_template_info.SerializeCallCode();
- SharedFunctionInfoRef target_ref(broker(), target);
+ SharedFunctionInfoRef target_ref = MakeRef(broker(), target);
target_ref.SerializeFunctionTemplateInfo();
if (target_template_info.accept_any_receiver() &&
@@ -2308,7 +2312,7 @@ void SerializerForBackgroundCompilation::ProcessApiCall(
void SerializerForBackgroundCompilation::ProcessReceiverMapForApiCall(
FunctionTemplateInfoRef target, Handle<Map> receiver) {
if (!receiver->is_access_check_needed()) {
- MapRef receiver_map(broker(), receiver);
+ MapRef receiver_map = MakeRef(broker(), receiver);
TRACE_BROKER(broker(), "Serializing holder for target: " << target);
target.LookupHolderOfExpectedType(receiver_map,
SerializationPolicy::kSerializeIfNeeded);
@@ -2356,10 +2360,10 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
if (arguments.size() >= 1) {
ProcessMapHintsForPromises(arguments[0]);
}
- SharedFunctionInfoRef(
+ MakeRef(
broker(),
broker()->isolate()->factory()->promise_catch_finally_shared_fun());
- SharedFunctionInfoRef(
+ MakeRef(
broker(),
broker()->isolate()->factory()->promise_then_finally_shared_fun());
}
@@ -2496,16 +2500,14 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
kMissingArgumentsAreUnknown, result_hints);
}
}
- SharedFunctionInfoRef(
- broker(), broker()
- ->isolate()
- ->factory()
- ->promise_capability_default_reject_shared_fun());
- SharedFunctionInfoRef(
- broker(), broker()
- ->isolate()
- ->factory()
- ->promise_capability_default_resolve_shared_fun());
+ MakeRef(broker(), broker()
+ ->isolate()
+ ->factory()
+ ->promise_capability_default_reject_shared_fun());
+ MakeRef(broker(), broker()
+ ->isolate()
+ ->factory()
+ ->promise_capability_default_resolve_shared_fun());
break;
case Builtins::kFunctionPrototypeCall:
@@ -2529,7 +2531,7 @@ void SerializerForBackgroundCompilation::ProcessBuiltinCall(
if (arguments.size() >= 2) {
for (auto constant : arguments[1].constants()) {
if (constant->IsJSFunction()) {
- JSFunctionRef(broker(), constant).Serialize();
+ MakeRef(broker(), Handle<JSFunction>::cast(constant)).Serialize();
}
}
}
@@ -2596,7 +2598,8 @@ void SerializerForBackgroundCompilation::ProcessHintsForOrdinaryHasInstance(
// For JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance.
if (constructor->IsHeapObject()) {
ProcessConstantForOrdinaryHasInstance(
- HeapObjectRef(broker(), constructor), &walk_prototypes);
+ MakeRef(broker(), Handle<HeapObject>::cast(constructor)),
+ &walk_prototypes);
}
}
// For JSNativeContextSpecialization::ReduceJSHasInPrototypeChain.
@@ -2606,10 +2609,10 @@ void SerializerForBackgroundCompilation::ProcessHintsForOrdinaryHasInstance(
void SerializerForBackgroundCompilation::ProcessHintsForHasInPrototypeChain(
Hints const& instance_hints) {
auto processMap = [&](Handle<Map> map_handle) {
- MapRef map(broker(), map_handle);
+ MapRef map = MakeRef(broker(), map_handle);
while (map.IsJSObjectMap()) {
map.SerializePrototype();
- map = map.prototype().map();
+ map = map.prototype().value().map();
}
};
@@ -2627,8 +2630,8 @@ void SerializerForBackgroundCompilation::ProcessHintsForPromiseResolve(
Hints const& resolution_hints) {
auto processMap = [&](Handle<Map> map) {
broker()->GetPropertyAccessInfo(
- MapRef(broker(), map),
- NameRef(broker(), broker()->isolate()->factory()->then_string()),
+ MakeRef(broker(), map),
+ MakeRef(broker(), broker()->isolate()->factory()->then_string()),
AccessMode::kLoad, dependencies(),
SerializationPolicy::kSerializeIfNeeded);
};
@@ -2650,25 +2653,25 @@ void SerializerForBackgroundCompilation::ProcessMapHintsForPromises(
if (!constant->IsJSPromise()) continue;
Handle<Map> map(Handle<HeapObject>::cast(constant)->map(),
broker()->isolate());
- MapRef(broker(), map).SerializePrototype();
+ MakeRef(broker(), map).SerializePrototype();
}
for (auto map : receiver_hints.maps()) {
if (!map->IsJSPromiseMap()) continue;
- MapRef(broker(), map).SerializePrototype();
+ MakeRef(broker(), map).SerializePrototype();
}
}
PropertyAccessInfo SerializerForBackgroundCompilation::ProcessMapForRegExpTest(
MapRef map) {
PropertyAccessInfo ai_exec = broker()->GetPropertyAccessInfo(
- map, NameRef(broker(), broker()->isolate()->factory()->exec_string()),
+ map, MakeRef(broker(), broker()->isolate()->factory()->exec_string()),
AccessMode::kLoad, dependencies(),
SerializationPolicy::kSerializeIfNeeded);
Handle<JSObject> holder;
if (ai_exec.IsFastDataConstant() && ai_exec.holder().ToHandle(&holder)) {
// The property is on the prototype chain.
- JSObjectRef holder_ref(broker(), holder);
+ JSObjectRef holder_ref = MakeRef(broker(), holder);
holder_ref.GetOwnFastDataProperty(ai_exec.field_representation(),
ai_exec.field_index(),
SerializationPolicy::kSerializeIfNeeded);
@@ -2680,14 +2683,14 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
Hints const& regexp_hints) {
for (auto hint : regexp_hints.constants()) {
if (!hint->IsJSRegExp()) continue;
- Handle<JSRegExp> regexp(Handle<JSRegExp>::cast(hint));
+ Handle<JSObject> regexp(Handle<JSObject>::cast(hint));
Handle<Map> regexp_map(regexp->map(), broker()->isolate());
PropertyAccessInfo ai_exec =
- ProcessMapForRegExpTest(MapRef(broker(), regexp_map));
+ ProcessMapForRegExpTest(MakeRef(broker(), regexp_map));
Handle<JSObject> holder;
if (ai_exec.IsFastDataConstant() && !ai_exec.holder().ToHandle(&holder)) {
// The property is on the object itself.
- JSObjectRef holder_ref(broker(), regexp);
+ JSObjectRef holder_ref = MakeRef(broker(), regexp);
holder_ref.GetOwnFastDataProperty(
ai_exec.field_representation(), ai_exec.field_index(),
SerializationPolicy::kSerializeIfNeeded);
@@ -2696,7 +2699,7 @@ void SerializerForBackgroundCompilation::ProcessHintsForRegExpTest(
for (auto map : regexp_hints.maps()) {
if (!map->IsJSRegExpMap()) continue;
- ProcessMapForRegExpTest(MapRef(broker(), map));
+ ProcessMapForRegExpTest(MakeRef(broker(), map));
}
}
@@ -2718,14 +2721,15 @@ void SerializerForBackgroundCompilation::ProcessHintsForFunctionBind(
Hints const& receiver_hints) {
for (auto constant : receiver_hints.constants()) {
if (!constant->IsJSFunction()) continue;
- JSFunctionRef function(broker(), constant);
+ JSFunctionRef function =
+ MakeRef(broker(), Handle<JSFunction>::cast(constant));
function.Serialize();
ProcessMapForFunctionBind(function.map());
}
for (auto map : receiver_hints.maps()) {
if (!map->IsJSFunctionMap()) continue;
- MapRef map_ref(broker(), map);
+ MapRef map_ref = MakeRef(broker(), map);
ProcessMapForFunctionBind(map_ref);
}
}
@@ -2734,13 +2738,13 @@ void SerializerForBackgroundCompilation::ProcessHintsForObjectGetPrototype(
Hints const& object_hints) {
for (auto constant : object_hints.constants()) {
if (!constant->IsHeapObject()) continue;
- HeapObjectRef object(broker(), constant);
+ HeapObjectRef object =
+ MakeRef(broker(), Handle<HeapObject>::cast(constant));
object.map().SerializePrototype();
}
for (auto map : object_hints.maps()) {
- MapRef map_ref(broker(), map);
- map_ref.SerializePrototype();
+ MakeRef(broker(), map).SerializePrototype();
}
}
@@ -2850,7 +2854,7 @@ void SerializerForBackgroundCompilation::ProcessGlobalAccess(FeedbackSlot slot,
void SerializerForBackgroundCompilation::VisitLdaGlobal(
BytecodeArrayIterator* iterator) {
- NameRef(broker(),
+ MakeRef(broker(),
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
FeedbackSlot slot = iterator->GetSlotOperand(1);
ProcessGlobalAccess(slot, true);
@@ -2863,15 +2867,15 @@ void SerializerForBackgroundCompilation::VisitLdaGlobalInsideTypeof(
void SerializerForBackgroundCompilation::VisitLdaLookupSlot(
BytecodeArrayIterator* iterator) {
- ObjectRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+ MakeRef(broker(),
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
environment()->accumulator_hints() = Hints();
}
void SerializerForBackgroundCompilation::VisitLdaLookupSlotInsideTypeof(
BytecodeArrayIterator* iterator) {
- ObjectRef(broker(),
- iterator->GetConstantForIndexOperand(0, broker()->isolate()));
+ MakeRef(broker(),
+ iterator->GetConstantForIndexOperand(0, broker()->isolate()));
environment()->accumulator_hints() = Hints();
}
@@ -2883,7 +2887,7 @@ void SerializerForBackgroundCompilation::ProcessCheckContextExtensions(
ProcessContextAccess(context_hints, Context::EXTENSION_INDEX, i,
kSerializeSlot);
}
- SharedFunctionInfoRef shared(broker(), function().shared());
+ SharedFunctionInfoRef shared = MakeRef(broker(), function().shared());
shared.SerializeScopeInfoChain();
}
@@ -2906,7 +2910,7 @@ void SerializerForBackgroundCompilation::VisitLdaLookupGlobalSlotInsideTypeof(
void SerializerForBackgroundCompilation::VisitStaGlobal(
BytecodeArrayIterator* iterator) {
- NameRef(broker(),
+ MakeRef(broker(),
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
FeedbackSlot slot = iterator->GetSlotOperand(1);
ProcessGlobalAccess(slot, false);
@@ -2916,7 +2920,7 @@ void SerializerForBackgroundCompilation::ProcessLdaLookupContextSlot(
BytecodeArrayIterator* iterator) {
const int slot_index = iterator->GetIndexOperand(1);
const int depth = iterator->GetUnsignedImmediateOperand(2);
- NameRef(broker(),
+ MakeRef(broker(),
iterator->GetConstantForIndexOperand(0, broker()->isolate()));
ProcessCheckContextExtensions(depth);
environment()->accumulator_hints() = Hints();
@@ -3016,7 +3020,8 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
access_info.IsDictionaryProtoAccessorConstant()) &&
!access_info.constant().is_null()) {
if (access_info.constant()->IsJSFunction()) {
- JSFunctionRef function(broker(), access_info.constant());
+ JSFunctionRef function =
+ MakeRef(broker(), Handle<JSFunction>::cast(access_info.constant()));
if (receiver_map.has_value()) {
// For JSCallReducer and JSInlining(Heuristic).
@@ -3033,9 +3038,8 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
// For JSCallReducer::ReduceCallApiFunction.
Handle<SharedFunctionInfo> sfi = function.shared().object();
if (sfi->IsApiFunction()) {
- FunctionTemplateInfoRef fti_ref(
- broker(),
- broker()->CanonicalPersistentHandle(sfi->get_api_func_data()));
+ FunctionTemplateInfoRef fti_ref =
+ MakeRef(broker(), sfi->get_api_func_data());
if (fti_ref.has_call_code()) {
fti_ref.SerializeCallCode();
ProcessReceiverMapForApiCall(fti_ref, receiver_map->object());
@@ -3043,19 +3047,19 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
}
}
} else if (access_info.constant()->IsJSBoundFunction()) {
- JSBoundFunctionRef function(broker(), access_info.constant());
-
// For JSCallReducer::ReduceJSCall.
+ JSBoundFunctionRef function = MakeRef(
+ broker(), Handle<JSBoundFunction>::cast(access_info.constant()));
function.Serialize();
} else {
- FunctionTemplateInfoRef fti(broker(), broker()->CanonicalPersistentHandle(
- access_info.constant()));
+ FunctionTemplateInfoRef fti = MakeRef(
+ broker(), FunctionTemplateInfo::cast(*access_info.constant()));
if (fti.has_call_code()) fti.SerializeCallCode();
}
} else if (access_info.IsModuleExport()) {
// For JSNativeContextSpecialization::BuildPropertyLoad
DCHECK(!access_info.constant().is_null());
- CellRef(broker(), access_info.constant());
+ MakeRef(broker(), Handle<Cell>::cast(access_info.constant()));
}
switch (access_mode) {
@@ -3067,7 +3071,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
base::Optional<JSObjectRef> holder;
Handle<JSObject> prototype;
if (access_info.holder().ToHandle(&prototype)) {
- holder = JSObjectRef(broker(), prototype);
+ holder = MakeRef(broker(), prototype);
} else {
CHECK_IMPLIES(concrete_receiver.has_value(),
concrete_receiver->map().equals(*receiver_map));
@@ -3095,7 +3099,7 @@ SerializerForBackgroundCompilation::ProcessMapForNamedPropertyAccess(
if (access_info.IsDataField() || access_info.IsFastDataConstant()) {
Handle<Map> transition_map;
if (access_info.transition_map().ToHandle(&transition_map)) {
- MapRef map_ref(broker(), transition_map);
+ MapRef map_ref = MakeRef(broker(), transition_map);
TRACE_BROKER(broker(), "Propagating transition map "
<< map_ref << " to receiver hints.");
receiver->AddMap(transition_map, zone(), broker_, false);
@@ -3225,7 +3229,7 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
Hints* receiver, NamedAccessFeedback const& feedback,
AccessMode access_mode, Hints* result_hints) {
for (Handle<Map> map : feedback.maps()) {
- MapRef map_ref(broker(), map);
+ MapRef map_ref = MakeRef(broker(), map);
TRACE_BROKER(broker(), "Propagating feedback map "
<< map_ref << " to receiver hints.");
receiver->AddMap(map, zone(), broker_, false);
@@ -3233,7 +3237,7 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
for (Handle<Map> map :
GetRelevantReceiverMaps(broker()->isolate(), receiver->maps())) {
- MapRef map_ref(broker(), map);
+ MapRef map_ref = MakeRef(broker(), map);
ProcessMapForNamedPropertyAccess(receiver, map_ref, map_ref,
feedback.name(), access_mode,
base::nullopt, result_hints);
@@ -3249,7 +3253,7 @@ void SerializerForBackgroundCompilation::ProcessNamedAccess(
}
// For JSNativeContextSpecialization::ReduceJSLoadNamed.
if (access_mode == AccessMode::kLoad && object.IsJSFunction() &&
- feedback.name().equals(ObjectRef(
+ feedback.name().equals(MakeRef(
broker(), broker()->isolate()->factory()->prototype_string()))) {
JSFunctionRef function = object.AsJSFunction();
function.Serialize();
@@ -3269,9 +3273,9 @@ void SerializerForBackgroundCompilation::ProcessNamedSuperAccess(
MapHandles receiver_maps =
GetRelevantReceiverMaps(broker()->isolate(), receiver->maps());
for (Handle<Map> receiver_map : receiver_maps) {
- MapRef receiver_map_ref(broker(), receiver_map);
+ MapRef receiver_map_ref = MakeRef(broker(), receiver_map);
for (Handle<Map> feedback_map : feedback.maps()) {
- MapRef feedback_map_ref(broker(), feedback_map);
+ MapRef feedback_map_ref = MakeRef(broker(), feedback_map);
ProcessMapForNamedPropertyAccess(
receiver, receiver_map_ref, feedback_map_ref, feedback.name(),
access_mode, base::nullopt, result_hints);
@@ -3279,7 +3283,7 @@ void SerializerForBackgroundCompilation::ProcessNamedSuperAccess(
}
if (receiver_maps.empty()) {
for (Handle<Map> feedback_map : feedback.maps()) {
- MapRef feedback_map_ref(broker(), feedback_map);
+ MapRef feedback_map_ref = MakeRef(broker(), feedback_map);
ProcessMapForNamedPropertyAccess(
receiver, base::nullopt, feedback_map_ref, feedback.name(),
access_mode, base::nullopt, result_hints);
@@ -3292,7 +3296,7 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
ElementAccessFeedback const& feedback, AccessMode access_mode) {
for (auto const& group : feedback.transition_groups()) {
for (Handle<Map> map_handle : group) {
- MapRef map(broker(), map_handle);
+ MapRef map = MakeRef(broker(), map_handle);
switch (access_mode) {
case AccessMode::kHas:
case AccessMode::kLoad:
@@ -3352,7 +3356,7 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
// For JSNativeContextSpecialization::InferRootMap
for (Handle<Map> map : receiver.maps()) {
- MapRef map_ref(broker(), map);
+ MapRef map_ref = MakeRef(broker(), map);
map_ref.SerializeRootMap();
}
}
@@ -3360,8 +3364,9 @@ void SerializerForBackgroundCompilation::ProcessElementAccess(
void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
BytecodeArrayIterator* iterator) {
Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name(broker(),
- iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ NameRef name =
+ MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
+ 1, broker()->isolate())));
FeedbackSlot slot = iterator->GetSlotOperand(2);
ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kLoad);
}
@@ -3369,8 +3374,9 @@ void SerializerForBackgroundCompilation::VisitLdaNamedProperty(
void SerializerForBackgroundCompilation::VisitLdaNamedPropertyFromSuper(
BytecodeArrayIterator* iterator) {
Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name(broker(),
- iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ NameRef name =
+ MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
+ 1, broker()->isolate())));
FeedbackSlot slot = iterator->GetSlotOperand(2);
ProcessNamedSuperPropertyAccess(receiver, name, slot, AccessMode::kLoad);
}
@@ -3379,30 +3385,32 @@ void SerializerForBackgroundCompilation::VisitLdaNamedPropertyFromSuper(
// bytecodes.
void SerializerForBackgroundCompilation::VisitLdaNamedPropertyNoFeedback(
BytecodeArrayIterator* iterator) {
- NameRef(broker(),
- iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
+ 1, broker()->isolate())));
}
void SerializerForBackgroundCompilation::VisitStaNamedProperty(
BytecodeArrayIterator* iterator) {
Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name(broker(),
- iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ NameRef name =
+ MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
+ 1, broker()->isolate())));
FeedbackSlot slot = iterator->GetSlotOperand(2);
ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStore);
}
void SerializerForBackgroundCompilation::VisitStaNamedPropertyNoFeedback(
BytecodeArrayIterator* iterator) {
- NameRef(broker(),
+ MakeRef(broker(),
iterator->GetConstantForIndexOperand(1, broker()->isolate()));
}
void SerializerForBackgroundCompilation::VisitStaNamedOwnProperty(
BytecodeArrayIterator* iterator) {
Hints* receiver = &register_hints(iterator->GetRegisterOperand(0));
- NameRef name(broker(),
- iterator->GetConstantForIndexOperand(1, broker()->isolate()));
+ NameRef name =
+ MakeRef(broker(), Handle<Name>::cast(iterator->GetConstantForIndexOperand(
+ 1, broker()->isolate())));
FeedbackSlot slot = iterator->GetSlotOperand(2);
ProcessNamedPropertyAccess(receiver, name, slot, AccessMode::kStoreInLiteral);
}
@@ -3440,7 +3448,7 @@ void SerializerForBackgroundCompilation::ProcessConstantForInstanceOf(
PropertyAccessInfo access_info = broker()->GetPropertyAccessInfo(
constructor_heap_object.map(),
- NameRef(broker(), broker()->isolate()->factory()->has_instance_symbol()),
+ MakeRef(broker(), broker()->isolate()->factory()->has_instance_symbol()),
AccessMode::kLoad, dependencies(),
SerializationPolicy::kSerializeIfNeeded);
@@ -3450,8 +3458,8 @@ void SerializerForBackgroundCompilation::ProcessConstantForInstanceOf(
} else if (access_info.IsFastDataConstant()) {
Handle<JSObject> holder;
bool found_on_proto = access_info.holder().ToHandle(&holder);
- JSObjectRef holder_ref = found_on_proto ? JSObjectRef(broker(), holder)
- : constructor.AsJSObject();
+ JSObjectRef holder_ref =
+ found_on_proto ? MakeRef(broker(), holder) : constructor.AsJSObject();
base::Optional<ObjectRef> constant = holder_ref.GetOwnFastDataProperty(
access_info.field_representation(), access_info.field_index(),
SerializationPolicy::kSerializeIfNeeded);
diff --git a/chromium/v8/src/compiler/simd-scalar-lowering.cc b/chromium/v8/src/compiler/simd-scalar-lowering.cc
deleted file mode 100644
index 7c96393c4ce..00000000000
--- a/chromium/v8/src/compiler/simd-scalar-lowering.cc
+++ /dev/null
@@ -1,2829 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/simd-scalar-lowering.h"
-
-#include "src/base/platform/wrappers.h"
-#include "src/codegen/machine-type.h"
-#include "src/common/globals.h"
-#include "src/compiler/diamond.h"
-#include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/node.h"
-#include "src/compiler/wasm-compiler.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-static const int kNumLanes64 = 2;
-static const int kNumLanes32 = 4;
-static const int kNumLanes16 = 8;
-static const int kNumLanes8 = 16;
-static const int32_t kMask16 = 0xFFFF;
-static const int32_t kMask8 = 0xFF;
-static const int32_t kShift16 = 16;
-static const int32_t kShift8 = 24;
-static const int32_t kShiftMask8 = 0x7;
-static const int32_t kShiftMask16 = 0xF;
-static const int32_t kShiftMask32 = 0x1F;
-static const int32_t kShiftMask64 = 0x3F;
-
-// Shift values are taken modulo lane size. This helper calculates the mask
-// required for different shift opcodes.
-int GetMaskForShift(Node* node) {
- switch (node->opcode()) {
- case IrOpcode::kI8x16Shl:
- case IrOpcode::kI8x16ShrS:
- case IrOpcode::kI8x16ShrU:
- return kShiftMask8;
- case IrOpcode::kI16x8Shl:
- case IrOpcode::kI16x8ShrS:
- case IrOpcode::kI16x8ShrU:
- return kShiftMask16;
- case IrOpcode::kI32x4Shl:
- case IrOpcode::kI32x4ShrS:
- case IrOpcode::kI32x4ShrU:
- return kShiftMask32;
- case IrOpcode::kI64x2Shl:
- case IrOpcode::kI64x2ShrS:
- case IrOpcode::kI64x2ShrU:
- return kShiftMask64;
- default:
- UNIMPLEMENTED();
- }
-}
-} // anonymous namespace
-
-SimdScalarLowering::SimdScalarLowering(
- MachineGraph* mcgraph, SimplifiedOperatorBuilder* simplified,
- Signature<MachineRepresentation>* signature)
- : mcgraph_(mcgraph),
- simplified_(simplified),
- state_(mcgraph->graph(), 3),
- stack_(mcgraph_->zone()),
- replacements_(nullptr),
- signature_(signature),
- placeholder_(graph()->NewNode(common()->Dead())),
- parameter_count_after_lowering_(-1) {
- DCHECK_NOT_NULL(graph());
- DCHECK_NOT_NULL(graph()->end());
- replacements_ = zone()->NewArray<Replacement>(graph()->NodeCount());
- memset(static_cast<void*>(replacements_), 0,
- sizeof(Replacement) * graph()->NodeCount());
-}
-
-void SimdScalarLowering::LowerGraph() {
- stack_.push_back({graph()->end(), 0});
- state_.Set(graph()->end(), State::kOnStack);
- replacements_[graph()->end()->id()].type = SimdType::kInt32x4;
-
- while (!stack_.empty()) {
- NodeState& top = stack_.back();
- if (top.input_index == top.node->InputCount()) {
- // All inputs of top have already been lowered, now lower top.
- stack_.pop_back();
- state_.Set(top.node, State::kVisited);
- LowerNode(top.node);
- } else {
- // Push the next input onto the stack.
- Node* input = top.node->InputAt(top.input_index++);
- if (state_.Get(input) == State::kUnvisited) {
- SetLoweredType(input, top.node);
- if (input->opcode() == IrOpcode::kPhi) {
- // To break cycles with phi nodes we push phis on a separate stack so
- // that they are processed after all other nodes.
- PreparePhiReplacement(input);
- stack_.push_front({input, 0});
- } else if (input->opcode() == IrOpcode::kEffectPhi ||
- input->opcode() == IrOpcode::kLoop) {
- stack_.push_front({input, 0});
- } else {
- stack_.push_back({input, 0});
- }
- state_.Set(input, State::kOnStack);
- }
- }
- }
-}
-
-#define FOREACH_INT64X2_OPCODE(V) \
- V(I64x2Splat) \
- V(I64x2ExtractLane) \
- V(I64x2ReplaceLane) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2Neg) \
- V(I64x2Shl) \
- V(I64x2ShrS) \
- V(I64x2ShrU) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2ExtMulLowI32x4S) \
- V(I64x2ExtMulLowI32x4U) \
- V(I64x2ExtMulHighI32x4S) \
- V(I64x2ExtMulHighI32x4U)
-
-#define FOREACH_INT32X4_OPCODE(V) \
- V(I32x4Splat) \
- V(I32x4ExtractLane) \
- V(I32x4ReplaceLane) \
- V(I32x4SConvertF32x4) \
- V(I32x4UConvertF32x4) \
- V(I32x4SConvertI16x8Low) \
- V(I32x4SConvertI16x8High) \
- V(I32x4Neg) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4ShrU) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
- V(I32x4DotI16x8S) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4LtS) \
- V(I32x4LtU) \
- V(I32x4GtS) \
- V(I32x4GtU) \
- V(I32x4LeS) \
- V(I32x4LeU) \
- V(I32x4GeS) \
- V(I32x4GeU) \
- V(I32x4UConvertI16x8Low) \
- V(I32x4UConvertI16x8High) \
- V(I32x4Abs) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor) \
- V(S128Not) \
- V(S128AndNot) \
- V(S128Select) \
- V(I64x2AllTrue) \
- V(I32x4AllTrue) \
- V(I16x8AllTrue) \
- V(V128AnyTrue) \
- V(I8x16AllTrue) \
- V(I32x4BitMask) \
- V(I32x4ExtMulLowI16x8S) \
- V(I32x4ExtMulLowI16x8U) \
- V(I32x4ExtMulHighI16x8S) \
- V(I32x4ExtMulHighI16x8U)
-
-#define FOREACH_FLOAT64X2_OPCODE(V) \
- V(F64x2Splat) \
- V(F64x2ExtractLane) \
- V(F64x2ReplaceLane) \
- V(F64x2Abs) \
- V(F64x2Neg) \
- V(F64x2Sqrt) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Pmin) \
- V(F64x2Pmax) \
- V(F64x2Ceil) \
- V(F64x2Floor) \
- V(F64x2Trunc) \
- V(F64x2NearestInt)
-
-#define FOREACH_FLOAT32X4_OPCODE(V) \
- V(F32x4Splat) \
- V(F32x4ExtractLane) \
- V(F32x4ReplaceLane) \
- V(F32x4SConvertI32x4) \
- V(F32x4UConvertI32x4) \
- V(F32x4Abs) \
- V(F32x4Neg) \
- V(F32x4Sqrt) \
- V(F32x4RecipApprox) \
- V(F32x4RecipSqrtApprox) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Pmin) \
- V(F32x4Pmax) \
- V(F32x4Ceil) \
- V(F32x4Floor) \
- V(F32x4Trunc) \
- V(F32x4NearestInt)
-
-#define FOREACH_FLOAT64x2_TO_INT64x2OPCODE(V) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le)
-
-#define FOREACH_FLOAT32X4_TO_INT32X4OPCODE(V) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(F32x4Gt) \
- V(F32x4Ge)
-
-#define FOREACH_INT16X8_OPCODE(V) \
- V(I16x8Splat) \
- V(I16x8ExtractLaneU) \
- V(I16x8ExtractLaneS) \
- V(I16x8ReplaceLane) \
- V(I16x8SConvertI8x16Low) \
- V(I16x8SConvertI8x16High) \
- V(I16x8Neg) \
- V(I16x8Shl) \
- V(I16x8ShrS) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSatS) \
- V(I16x8Sub) \
- V(I16x8SubSatS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8UConvertI8x16Low) \
- V(I16x8UConvertI8x16High) \
- V(I16x8ShrU) \
- V(I16x8UConvertI32x4) \
- V(I16x8AddSatU) \
- V(I16x8SubSatU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8LtS) \
- V(I16x8LtU) \
- V(I16x8GtS) \
- V(I16x8GtU) \
- V(I16x8LeS) \
- V(I16x8LeU) \
- V(I16x8GeS) \
- V(I16x8GeU) \
- V(I16x8RoundingAverageU) \
- V(I16x8Abs) \
- V(I16x8BitMask) \
- V(I16x8ExtMulLowI8x16S) \
- V(I16x8ExtMulLowI8x16U) \
- V(I16x8ExtMulHighI8x16S) \
- V(I16x8ExtMulHighI8x16U)
-
-#define FOREACH_INT8X16_OPCODE(V) \
- V(I8x16Splat) \
- V(I8x16ExtractLaneU) \
- V(I8x16ExtractLaneS) \
- V(I8x16ReplaceLane) \
- V(I8x16SConvertI16x8) \
- V(I8x16Neg) \
- V(I8x16Shl) \
- V(I8x16ShrS) \
- V(I8x16Add) \
- V(I8x16AddSatS) \
- V(I8x16Sub) \
- V(I8x16SubSatS) \
- V(I8x16MinS) \
- V(I8x16MaxS) \
- V(I8x16ShrU) \
- V(I8x16UConvertI16x8) \
- V(I8x16AddSatU) \
- V(I8x16SubSatU) \
- V(I8x16MinU) \
- V(I8x16MaxU) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16LtS) \
- V(I8x16LtU) \
- V(I8x16GtS) \
- V(I8x16GtU) \
- V(I8x16LeS) \
- V(I8x16LeU) \
- V(I8x16GeS) \
- V(I8x16GeU) \
- V(I8x16Swizzle) \
- V(I8x16Shuffle) \
- V(I8x16RoundingAverageU) \
- V(I8x16Abs) \
- V(I8x16BitMask)
-
-MachineType SimdScalarLowering::MachineTypeFrom(SimdType simdType) {
- switch (simdType) {
- case SimdType::kFloat64x2:
- return MachineType::Float64();
- case SimdType::kFloat32x4:
- return MachineType::Float32();
- case SimdType::kInt64x2:
- return MachineType::Int64();
- case SimdType::kInt32x4:
- return MachineType::Int32();
- case SimdType::kInt16x8:
- return MachineType::Int16();
- case SimdType::kInt8x16:
- return MachineType::Int8();
- }
- return MachineType::None();
-}
-
-void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
- switch (node->opcode()) {
-#define CASE_STMT(name) case IrOpcode::k##name:
- FOREACH_FLOAT64X2_OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kFloat64x2;
- break;
- }
- FOREACH_INT64X2_OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt64x2;
- break;
- }
- FOREACH_INT32X4_OPCODE(CASE_STMT)
- case IrOpcode::kReturn:
- case IrOpcode::kParameter:
- case IrOpcode::kPhi:
- case IrOpcode::kCall: {
- replacements_[node->id()].type = SimdType::kInt32x4;
- break;
- }
- FOREACH_FLOAT32X4_OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kFloat32x4;
- break;
- }
- FOREACH_FLOAT32X4_TO_INT32X4OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt32x4;
- break;
- }
- FOREACH_FLOAT64x2_TO_INT64x2OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt64x2;
- break;
- }
- FOREACH_INT16X8_OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt16x8;
- break;
- }
- FOREACH_INT8X16_OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kInt8x16;
- break;
- }
- case IrOpcode::kLoadTransform: {
- LoadTransformParameters params = LoadTransformParametersOf(node->op());
- switch (params.transformation) {
- case LoadTransformation::kS128Load8Splat:
- replacements_[node->id()].type = SimdType::kInt8x16;
- break;
- case LoadTransformation::kS128Load16Splat:
- case LoadTransformation::kS128Load8x8S:
- case LoadTransformation::kS128Load8x8U:
- replacements_[node->id()].type = SimdType::kInt16x8;
- break;
- case LoadTransformation::kS128Load32Splat:
- case LoadTransformation::kS128Load16x4S:
- case LoadTransformation::kS128Load16x4U:
- case LoadTransformation::kS128Load32Zero:
- replacements_[node->id()].type = SimdType::kInt32x4;
- break;
- case LoadTransformation::kS128Load64Splat:
- case LoadTransformation::kS128Load32x2S:
- case LoadTransformation::kS128Load32x2U:
- case LoadTransformation::kS128Load64Zero:
- replacements_[node->id()].type = SimdType::kInt64x2;
- break;
- default:
- UNIMPLEMENTED();
- }
- break;
- }
- default: {
- switch (output->opcode()) {
- case IrOpcode::kF32x4SConvertI32x4:
- case IrOpcode::kF32x4UConvertI32x4:
- case IrOpcode::kI16x8SConvertI32x4:
- case IrOpcode::kI16x8UConvertI32x4: {
- replacements_[node->id()].type = SimdType::kInt32x4;
- break;
- }
- case IrOpcode::kI8x16SConvertI16x8:
- case IrOpcode::kI8x16UConvertI16x8:
- case IrOpcode::kI32x4SConvertI16x8Low:
- case IrOpcode::kI32x4SConvertI16x8High:
- case IrOpcode::kI32x4UConvertI16x8Low:
- case IrOpcode::kI32x4UConvertI16x8High: {
- replacements_[node->id()].type = SimdType::kInt16x8;
- break;
- }
- case IrOpcode::kI16x8SConvertI8x16Low:
- case IrOpcode::kI16x8SConvertI8x16High:
- case IrOpcode::kI16x8UConvertI8x16Low:
- case IrOpcode::kI16x8UConvertI8x16High: {
- replacements_[node->id()].type = SimdType::kInt8x16;
- break;
- }
- FOREACH_FLOAT32X4_TO_INT32X4OPCODE(CASE_STMT)
- case IrOpcode::kI32x4SConvertF32x4:
- case IrOpcode::kI32x4UConvertF32x4: {
- replacements_[node->id()].type = SimdType::kFloat32x4;
- break;
- }
- case IrOpcode::kS128Select: {
- replacements_[node->id()].type = SimdType::kInt32x4;
- break;
- }
- default: {
- replacements_[node->id()].type = replacements_[output->id()].type;
- }
- }
- }
-#undef CASE_STMT
- }
-}
-
-static int GetParameterIndexAfterLoweringSimd128(
- Signature<MachineRepresentation>* signature, int old_index) {
- // In function calls, the simd128 types are passed as 4 Int32 types. The
- // parameters are typecast to the types as needed for various operations.
- int result = old_index;
- for (int i = 0; i < old_index; ++i) {
- if (signature->GetParam(i) == MachineRepresentation::kSimd128) {
- result += 3;
- }
- }
- return result;
-}
-
-int SimdScalarLowering::GetParameterCountAfterLowering() {
- if (parameter_count_after_lowering_ == -1) {
- // GetParameterIndexAfterLoweringSimd128(parameter_count) returns the
- // parameter count after lowering.
- parameter_count_after_lowering_ = GetParameterIndexAfterLoweringSimd128(
- signature(), static_cast<int>(signature()->parameter_count()));
- }
- return parameter_count_after_lowering_;
-}
-
-static int GetReturnCountAfterLoweringSimd128(
- Signature<MachineRepresentation>* signature) {
- int result = static_cast<int>(signature->return_count());
- for (int i = 0; i < static_cast<int>(signature->return_count()); ++i) {
- if (signature->GetReturn(i) == MachineRepresentation::kSimd128) {
- result += 3;
- }
- }
- return result;
-}
-
-int GetReturnIndexAfterLowering(const CallDescriptor* call_descriptor,
- int old_index) {
- int result = old_index;
- for (int i = 0; i < old_index; ++i) {
- if (call_descriptor->GetReturnType(i).representation() ==
- MachineRepresentation::kSimd128) {
- result += kNumLanes32 - 1;
- }
- }
- return result;
-}
-
-static int GetReturnCountAfterLoweringSimd128(
- const CallDescriptor* call_descriptor) {
- return GetReturnIndexAfterLowering(
- call_descriptor, static_cast<int>(call_descriptor->ReturnCount()));
-}
-
-int SimdScalarLowering::NumLanes(SimdType type) {
- int num_lanes = 0;
- if (type == SimdType::kFloat64x2 || type == SimdType::kInt64x2) {
- num_lanes = kNumLanes64;
- } else if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4) {
- num_lanes = kNumLanes32;
- } else if (type == SimdType::kInt16x8) {
- num_lanes = kNumLanes16;
- } else if (type == SimdType::kInt8x16) {
- num_lanes = kNumLanes8;
- } else {
- UNREACHABLE();
- }
- return num_lanes;
-}
-
-constexpr int SimdScalarLowering::kLaneOffsets[];
-
-void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
- SimdType type) {
- int num_lanes = NumLanes(type);
- int lane_width = kSimd128Size / num_lanes;
- int laneIndex = kLaneOffsets[0] / lane_width;
-
- Node* rep = index;
-
- if (HasReplacement(0, index)) {
- // Index nodes are lowered to scalar nodes.
- DCHECK_EQ(1, ReplacementCount(index));
- rep = GetReplacements(index)[0];
- }
-
- new_indices[laneIndex] = rep;
- for (int i = 1; i < num_lanes; ++i) {
- laneIndex = kLaneOffsets[i * lane_width] / lane_width;
- new_indices[laneIndex] = graph()->NewNode(
- machine()->Int32Add(), rep,
- graph()->NewNode(
- common()->Int32Constant(static_cast<int>(i) * lane_width)));
- }
-}
-
-void SimdScalarLowering::LowerLoadOp(Node* node, SimdType type) {
- MachineRepresentation rep =
- node->opcode() == IrOpcode::kLoadFromObject
- ? ObjectAccessOf(node->op()).machine_type.representation()
- : LoadRepresentationOf(node->op()).representation();
- const Operator* load_op;
- switch (node->opcode()) {
- case IrOpcode::kLoad:
- load_op = machine()->Load(MachineTypeFrom(type));
- break;
- case IrOpcode::kLoadImmutable:
- load_op = machine()->LoadImmutable(MachineTypeFrom(type));
- break;
- case IrOpcode::kLoadFromObject:
- load_op = simplified()->LoadFromObject(
- ObjectAccess(MachineTypeFrom(type), kNoWriteBarrier));
- break;
- case IrOpcode::kUnalignedLoad:
- load_op = machine()->UnalignedLoad(MachineTypeFrom(type));
- break;
- case IrOpcode::kProtectedLoad:
- load_op = machine()->ProtectedLoad(MachineTypeFrom(type));
- break;
- default:
- UNREACHABLE();
- }
- if (rep == MachineRepresentation::kSimd128) {
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- int num_lanes = NumLanes(type);
- Node** indices = zone()->NewArray<Node*>(num_lanes);
- GetIndexNodes(index, indices, type);
- Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
- rep_nodes[0] = node;
- rep_nodes[0]->ReplaceInput(1, indices[0]);
- NodeProperties::ChangeOp(rep_nodes[0], load_op);
- if (node->InputCount() > 2) {
- DCHECK_LT(3, node->InputCount());
- Node* effect_input = node->InputAt(2);
- Node* control_input = node->InputAt(3);
- for (int i = num_lanes - 1; i > 0; --i) {
- rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
- control_input);
- effect_input = rep_nodes[i];
- }
- rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
- } else {
- for (int i = 1; i < num_lanes; ++i) {
- rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
- }
- }
- ReplaceNode(node, rep_nodes, num_lanes);
- } else {
- DefaultLowering(node);
- }
-}
-
-void SimdScalarLowering::LowerLoadTransformOp(Node* node, SimdType type) {
- LoadTransformParameters params = LoadTransformParametersOf(node->op());
- MachineType load_rep = MachineType::None();
- SimdType load_type = type;
-
- // Load extends have a different machine type for loading.
- switch (params.transformation) {
- case LoadTransformation::kS128Load8x8S:
- load_rep = MachineType::Int8();
- load_type = SimdType::kInt8x16;
- break;
- case LoadTransformation::kS128Load8x8U:
- load_rep = MachineType::Uint8();
- load_type = SimdType::kInt8x16;
- break;
- case LoadTransformation::kS128Load16x4S:
- load_rep = MachineType::Int16();
- load_type = SimdType::kInt16x8;
- break;
- case LoadTransformation::kS128Load16x4U:
- load_rep = MachineType::Uint16();
- load_type = SimdType::kInt16x8;
- break;
- case LoadTransformation::kS128Load32x2S:
- load_rep = MachineType::Int32();
- load_type = SimdType::kInt32x4;
- break;
- case LoadTransformation::kS128Load32x2U:
- load_rep = MachineType::Uint32();
- load_type = SimdType::kInt32x4;
- break;
- case LoadTransformation::kS128Load8Splat:
- case LoadTransformation::kS128Load16Splat:
- case LoadTransformation::kS128Load32Splat:
- case LoadTransformation::kS128Load64Splat:
- case LoadTransformation::kS128Load32Zero:
- case LoadTransformation::kS128Load64Zero:
- load_rep = MachineTypeFrom(type);
- break;
- default:
- UNREACHABLE();
- }
-
- DCHECK_NE(load_rep, MachineType::None());
-
- const Operator* load_op;
- switch (params.kind) {
- case MemoryAccessKind::kNormal:
- load_op = machine()->Load(load_rep);
- break;
- case MemoryAccessKind::kUnaligned:
- load_op = machine()->UnalignedLoad(load_rep);
- break;
- case MemoryAccessKind::kProtected:
- load_op = machine()->ProtectedLoad(load_rep);
- break;
- }
-
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- int num_lanes = NumLanes(type);
- Node** reps = zone()->NewArray<Node*>(num_lanes);
- Node* effect_input = node->InputAt(2);
- Node* control_input = node->InputAt(3);
-
- // This node is also used as effect input into other nodes, so we need to
- // change this node in place.
- reps[0] = node;
- NodeProperties::ChangeOp(reps[0], load_op);
-
- if (type != load_type) {
- // We load a smaller lane size, then extend to a larger lane size. So use
- // the smaller lane size to calculte the index nodes for loads, but only
- // actually load half of those lanes.
- Node** indices = zone()->NewArray<Node*>(num_lanes * 2);
- GetIndexNodes(index, indices, load_type);
-
- reps[0]->ReplaceInput(1, indices[0]);
-
- for (int i = num_lanes - 1; i > 0; --i) {
- reps[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
- control_input);
- effect_input = reps[i];
- }
- } else {
- if (params.transformation == LoadTransformation::kS128Load32Zero) {
- for (int i = num_lanes - 1; i > 0; --i) {
- reps[i] = mcgraph_->Int32Constant(0);
- }
- } else if (params.transformation == LoadTransformation::kS128Load64Zero) {
- for (int i = num_lanes - 1; i > 0; --i) {
- reps[i] = mcgraph_->Int64Constant(0);
- }
- } else {
- // Load splat, load from the same index for every lane.
- Node* rep = HasReplacement(0, index) ? GetReplacements(index)[0] : index;
-
- // Replace first node, we only called ChangeOp above.
- reps[0]->ReplaceInput(1, rep);
- for (int i = num_lanes - 1; i > 0; --i) {
- reps[i] =
- graph()->NewNode(load_op, base, rep, effect_input, control_input);
- effect_input = reps[i];
- }
- }
- }
-
- // Update the effect input, completing the effect chain, but only if there is
- // an effect output (LoadZero does not have an effect output, it is zero).
- if (reps[1]->op()->EffectOutputCount() > 0) {
- reps[0]->ReplaceInput(2, reps[1]);
- }
-
- // Special case, the load nodes need to be sign extended, and we do it here so
- // the loop above can connect all the effect edges correctly.
- if (params.transformation == LoadTransformation::kS128Load32x2S) {
- for (int i = 0; i < num_lanes; ++i) {
- reps[i] = graph()->NewNode(machine()->ChangeInt32ToInt64(), reps[i]);
- }
- } else if (params.transformation == LoadTransformation::kS128Load32x2U) {
- for (int i = 0; i < num_lanes; ++i) {
- reps[i] = graph()->NewNode(machine()->ChangeUint32ToUint64(), reps[i]);
- }
- }
-
- ReplaceNode(node, reps, num_lanes);
-}
-
-void SimdScalarLowering::LowerStoreOp(Node* node) {
- // For store operation, use replacement type of its input instead of the
- // one of its effected node.
- DCHECK_LT(2, node->InputCount());
- SimdType rep_type = ReplacementType(node->InputAt(2));
- replacements_[node->id()].type = rep_type;
- const Operator* store_op;
- MachineRepresentation rep;
- switch (node->opcode()) {
- case IrOpcode::kStore: {
- rep = StoreRepresentationOf(node->op()).representation();
- WriteBarrierKind write_barrier_kind =
- StoreRepresentationOf(node->op()).write_barrier_kind();
- store_op = machine()->Store(StoreRepresentation(
- MachineTypeFrom(rep_type).representation(), write_barrier_kind));
- break;
- }
- case IrOpcode::kStoreToObject: {
- rep = ObjectAccessOf(node->op()).machine_type.representation();
- WriteBarrierKind write_barrier_kind =
- ObjectAccessOf(node->op()).write_barrier_kind;
- store_op = simplified()->StoreToObject(
- ObjectAccess(MachineTypeFrom(rep_type), write_barrier_kind));
- break;
- }
- case IrOpcode::kUnalignedStore: {
- rep = UnalignedStoreRepresentationOf(node->op());
- store_op =
- machine()->UnalignedStore(MachineTypeFrom(rep_type).representation());
- break;
- }
- case IrOpcode::kProtectedStore: {
- rep = StoreRepresentationOf(node->op()).representation();
- store_op =
- machine()->ProtectedStore(MachineTypeFrom(rep_type).representation());
- break;
- }
- default:
- UNREACHABLE();
- }
- if (rep == MachineRepresentation::kSimd128) {
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- int num_lanes = NumLanes(rep_type);
- Node** indices = zone()->NewArray<Node*>(num_lanes);
- GetIndexNodes(index, indices, rep_type);
- Node* value = node->InputAt(2);
- DCHECK(HasReplacement(1, value));
- Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
- rep_nodes[0] = node;
- Node** rep_inputs = GetReplacementsWithType(value, rep_type);
- rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
- rep_nodes[0]->ReplaceInput(1, indices[0]);
- NodeProperties::ChangeOp(node, store_op);
- if (node->InputCount() > 3) {
- DCHECK_LT(4, node->InputCount());
- Node* effect_input = node->InputAt(3);
- Node* control_input = node->InputAt(4);
- for (int i = num_lanes - 1; i > 0; --i) {
- rep_nodes[i] =
- graph()->NewNode(store_op, base, indices[i], rep_inputs[i],
- effect_input, control_input);
- effect_input = rep_nodes[i];
- }
- rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
- } else {
- for (int i = 1; i < num_lanes; ++i) {
- rep_nodes[i] =
- graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
- }
- }
- ReplaceNode(node, rep_nodes, num_lanes);
- } else {
- DefaultLowering(node);
- }
-}
-
-void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
- const Operator* op,
- bool not_horizontal) {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- int num_lanes = NumLanes(input_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- if (not_horizontal) {
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
- }
- } else {
- for (int i = 0; i < num_lanes / 2; ++i) {
- rep_node[i] = graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]);
- rep_node[i + num_lanes / 2] =
- graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]);
- }
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-void SimdScalarLowering::LowerCompareOp(Node* node, SimdType input_rep_type,
- const Operator* op,
- bool invert_inputs) {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- int num_lanes = NumLanes(input_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- Node* cmp_result = nullptr;
- if (invert_inputs) {
- cmp_result = graph()->NewNode(op, rep_right[i], rep_left[i]);
- } else {
- cmp_result = graph()->NewNode(op, rep_left[i], rep_right[i]);
- }
- Diamond d_cmp(graph(), common(), cmp_result);
- rep_node[i] = ConstructPhiForComparison(d_cmp, input_rep_type, -1, 0);
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
- return graph()->NewNode(machine()->Word32Sar(),
- graph()->NewNode(machine()->Word32Shl(), input,
- mcgraph_->Int32Constant(shift)),
- mcgraph_->Int32Constant(shift));
-}
-
-void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
- SimdType input_rep_type,
- const Operator* op,
- bool not_horizontal) {
- DCHECK_EQ(2, node->InputCount());
- DCHECK(input_rep_type == SimdType::kInt16x8 ||
- input_rep_type == SimdType::kInt8x16);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- int num_lanes = NumLanes(input_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- int32_t shift_val =
- (input_rep_type == SimdType::kInt16x8) ? kShift16 : kShift8;
- if (not_horizontal) {
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = FixUpperBits(
- graph()->NewNode(op, rep_left[i], rep_right[i]), shift_val);
- }
- } else {
- for (int i = 0; i < num_lanes / 2; ++i) {
- rep_node[i] = FixUpperBits(
- graph()->NewNode(op, rep_left[i * 2], rep_left[i * 2 + 1]),
- shift_val);
- rep_node[i + num_lanes / 2] = FixUpperBits(
- graph()->NewNode(op, rep_right[i * 2], rep_right[i * 2 + 1]),
- shift_val);
- }
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-Node* SimdScalarLowering::Mask(Node* input, int32_t mask) {
- return graph()->NewNode(machine()->Word32And(), input,
- mcgraph_->Int32Constant(mask));
-}
-
-void SimdScalarLowering::LowerSaturateBinaryOp(Node* node,
- SimdType input_rep_type,
- const Operator* op,
- bool is_signed) {
- DCHECK_EQ(2, node->InputCount());
- DCHECK(input_rep_type == SimdType::kInt16x8 ||
- input_rep_type == SimdType::kInt8x16);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- int32_t min = 0;
- int32_t max = 0;
- int32_t mask = 0;
- int32_t shift_val = 0;
- MachineRepresentation phi_rep;
- if (input_rep_type == SimdType::kInt16x8) {
- if (is_signed) {
- min = std::numeric_limits<int16_t>::min();
- max = std::numeric_limits<int16_t>::max();
- } else {
- min = std::numeric_limits<uint16_t>::min();
- max = std::numeric_limits<uint16_t>::max();
- }
- mask = kMask16;
- shift_val = kShift16;
- phi_rep = MachineRepresentation::kWord16;
- } else {
- if (is_signed) {
- min = std::numeric_limits<int8_t>::min();
- max = std::numeric_limits<int8_t>::max();
- } else {
- min = std::numeric_limits<uint8_t>::min();
- max = std::numeric_limits<uint8_t>::max();
- }
- mask = kMask8;
- shift_val = kShift8;
- phi_rep = MachineRepresentation::kWord8;
- }
- int num_lanes = NumLanes(input_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- Node* op_result = nullptr;
- Node* left = is_signed ? rep_left[i] : Mask(rep_left[i], mask);
- Node* right = is_signed ? rep_right[i] : Mask(rep_right[i], mask);
- op_result = graph()->NewNode(op, left, right);
- Diamond d_min(graph(), common(),
- graph()->NewNode(machine()->Int32LessThan(), op_result,
- mcgraph_->Int32Constant(min)));
- rep_node[i] = d_min.Phi(phi_rep, mcgraph_->Int32Constant(min), op_result);
- Diamond d_max(graph(), common(),
- graph()->NewNode(machine()->Int32LessThan(),
- mcgraph_->Int32Constant(max), rep_node[i]));
- rep_node[i] = d_max.Phi(phi_rep, mcgraph_->Int32Constant(max), rep_node[i]);
- rep_node[i] =
- is_signed ? rep_node[i] : FixUpperBits(rep_node[i], shift_val);
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type,
- const Operator* op) {
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- int num_lanes = NumLanes(input_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep[i]);
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-void SimdScalarLowering::LowerIntMinMax(Node* node, const Operator* op,
- bool is_max, SimdType type) {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
- int num_lanes = NumLanes(type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- MachineRepresentation rep = MachineRepresentation::kNone;
- if (type == SimdType::kInt32x4) {
- rep = MachineRepresentation::kWord32;
- } else if (type == SimdType::kInt16x8) {
- rep = MachineRepresentation::kWord16;
- } else if (type == SimdType::kInt8x16) {
- rep = MachineRepresentation::kWord8;
- } else {
- UNREACHABLE();
- }
- for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(op, rep_left[i], rep_right[i]));
- if (is_max) {
- rep_node[i] = d.Phi(rep, rep_right[i], rep_left[i]);
- } else {
- rep_node[i] = d.Phi(rep, rep_left[i], rep_right[i]);
- }
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
- if (machine()->Float64RoundTruncate().IsSupported()) {
- return graph()->NewNode(machine()->Float64RoundTruncate().op(), input);
- } else {
- ExternalReference ref = ExternalReference::wasm_f64_trunc();
- Node* stack_slot =
- graph()->NewNode(machine()->StackSlot(MachineRepresentation::kFloat64));
- const Operator* store_op = machine()->Store(
- StoreRepresentation(MachineRepresentation::kFloat64, kNoWriteBarrier));
- Node* effect =
- graph()->NewNode(store_op, stack_slot, mcgraph_->Int32Constant(0),
- input, graph()->start(), graph()->start());
- Node* function = graph()->NewNode(common()->ExternalConstant(ref));
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = function;
- args[1] = stack_slot;
- args[2] = effect;
- args[3] = graph()->start();
- Signature<MachineType>::Builder sig_builder(zone(), 0, 1);
- sig_builder.AddParam(MachineType::Pointer());
- auto call_descriptor =
- Linkage::GetSimplifiedCDescriptor(zone(), sig_builder.Build());
- Node* call = graph()->NewNode(common()->Call(call_descriptor), 4, args);
- return graph()->NewNode(machine()->Load(LoadRepresentation::Float64()),
- stack_slot, mcgraph_->Int32Constant(0), call,
- graph()->start());
- }
-}
-
-void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32x4);
- Node* rep_node[kNumLanes32];
- Node* double_zero = graph()->NewNode(common()->Float64Constant(0.0));
- Node* min = graph()->NewNode(
- common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
- Node* max = graph()->NewNode(common()->Float64Constant(
- static_cast<double>(is_signed ? kMaxInt : 0xFFFFFFFFu)));
- for (int i = 0; i < kNumLanes32; ++i) {
- Node* double_rep =
- graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
- Diamond nan_d(
- graph(), common(),
- graph()->NewNode(machine()->Float64Equal(), double_rep, double_rep));
- Node* temp =
- nan_d.Phi(MachineRepresentation::kFloat64, double_rep, double_zero);
- Diamond min_d(graph(), common(),
- graph()->NewNode(machine()->Float64LessThan(), temp, min));
- temp = min_d.Phi(MachineRepresentation::kFloat64, min, temp);
- Diamond max_d(graph(), common(),
- graph()->NewNode(machine()->Float64LessThan(), max, temp));
- temp = max_d.Phi(MachineRepresentation::kFloat64, max, temp);
- Node* trunc = BuildF64Trunc(temp);
- if (is_signed) {
- rep_node[i] = graph()->NewNode(machine()->ChangeFloat64ToInt32(), trunc);
- } else {
- rep_node[i] =
- graph()->NewNode(machine()->TruncateFloat64ToUint32(), trunc);
- }
- }
- ReplaceNode(node, rep_node, kNumLanes32);
-}
-
-void SimdScalarLowering::LowerConvertFromInt(Node* node,
- SimdType input_rep_type,
- SimdType output_rep_type,
- bool is_signed, int start_index) {
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
-
- int32_t mask = 0;
- if (input_rep_type == SimdType::kInt16x8) {
- DCHECK_EQ(output_rep_type, SimdType::kInt32x4);
- mask = kMask16;
- } else {
- DCHECK_EQ(output_rep_type, SimdType::kInt16x8);
- DCHECK_EQ(input_rep_type, SimdType::kInt8x16);
- mask = kMask8;
- }
-
- int num_lanes = NumLanes(output_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] =
- is_signed ? rep[i + start_index] : Mask(rep[i + start_index], mask);
- }
-
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-void SimdScalarLowering::LowerPack(Node* node, SimdType input_rep_type,
- SimdType output_rep_type, bool is_signed) {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- const Operator* less_op = machine()->Int32LessThan();
- Node* min = nullptr;
- Node* max = nullptr;
- const Operator* sign_extend;
- MachineRepresentation phi_rep;
- if (output_rep_type == SimdType::kInt16x8) {
- sign_extend = machine()->SignExtendWord16ToInt32();
- DCHECK(input_rep_type == SimdType::kInt32x4);
- if (is_signed) {
- min = mcgraph_->Int32Constant(std::numeric_limits<int16_t>::min());
- max = mcgraph_->Int32Constant(std::numeric_limits<int16_t>::max());
- } else {
- min = mcgraph_->Uint32Constant(std::numeric_limits<uint16_t>::min());
- max = mcgraph_->Uint32Constant(std::numeric_limits<uint16_t>::max());
- }
- phi_rep = MachineRepresentation::kWord16;
- } else {
- sign_extend = machine()->SignExtendWord8ToInt32();
- DCHECK(output_rep_type == SimdType::kInt8x16 &&
- input_rep_type == SimdType::kInt16x8);
- if (is_signed) {
- min = mcgraph_->Int32Constant(std::numeric_limits<int8_t>::min());
- max = mcgraph_->Int32Constant(std::numeric_limits<int8_t>::max());
- } else {
- min = mcgraph_->Uint32Constant(std::numeric_limits<uint8_t>::min());
- max = mcgraph_->Uint32Constant(std::numeric_limits<uint8_t>::max());
- }
- phi_rep = MachineRepresentation::kWord8;
- }
- int num_lanes = NumLanes(output_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- Node* input = nullptr;
- if (i < num_lanes / 2)
- input = rep_left[i];
- else
- input = rep_right[i - num_lanes / 2];
- Diamond d_min(graph(), common(), graph()->NewNode(less_op, input, min));
- input = d_min.Phi(phi_rep, min, input);
- Diamond d_max(graph(), common(), graph()->NewNode(less_op, max, input));
- // We keep nodes in sign-extended form. E.g. for uint8_t, we need to
- // compare with 0x000000ff (saturated narrowing), but the result of
- // conversion should be 0xffffffff to work well with the rest of lowering.
- rep_node[i] = graph()->NewNode(sign_extend, d_max.Phi(phi_rep, max, input));
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
- DCHECK_EQ(2, node->InputCount());
-
- // The shift node, if it has a replacement, should be a single scalar.
- DCHECK_GE(1, ReplacementCount(node->InputAt(1)));
- Node* val = (HasReplacement(0, node->InputAt(1)))
- ? GetReplacements(node->InputAt(1))[0]
- : node->InputAt(1);
-
- Node* shift_node = Mask(val, GetMaskForShift(node));
- Node** rep = GetReplacementsWithType(node->InputAt(0), type);
- int num_lanes = NumLanes(type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = rep[i];
- switch (node->opcode()) {
- case IrOpcode::kI8x16ShrU:
- rep_node[i] = Mask(rep_node[i], kMask8);
- rep_node[i] =
- graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
- break;
- case IrOpcode::kI16x8ShrU:
- rep_node[i] = Mask(rep_node[i], kMask16);
- V8_FALLTHROUGH;
- case IrOpcode::kI32x4ShrU:
- rep_node[i] =
- graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
- break;
- case IrOpcode::kI64x2ShrU:
- rep_node[i] =
- graph()->NewNode(machine()->Word64Shr(), rep_node[i], shift_node);
- break;
- case IrOpcode::kI64x2Shl:
- rep_node[i] =
- graph()->NewNode(machine()->Word64Shl(), rep_node[i], shift_node);
- break;
- case IrOpcode::kI32x4Shl:
- rep_node[i] =
- graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
- break;
- case IrOpcode::kI16x8Shl:
- rep_node[i] =
- graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
- rep_node[i] = FixUpperBits(rep_node[i], kShift16);
- break;
- case IrOpcode::kI8x16Shl:
- rep_node[i] =
- graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
- rep_node[i] = FixUpperBits(rep_node[i], kShift8);
- break;
- case IrOpcode::kI64x2ShrS:
- rep_node[i] =
- graph()->NewNode(machine()->Word64Sar(), rep_node[i], shift_node);
- break;
- case IrOpcode::kI32x4ShrS:
- case IrOpcode::kI16x8ShrS:
- case IrOpcode::kI8x16ShrS:
- rep_node[i] =
- graph()->NewNode(machine()->Word32Sar(), rep_node[i], shift_node);
- break;
- default:
- UNREACHABLE();
- }
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-Node* SimdScalarLowering::ConstructPhiForComparison(Diamond d,
- SimdType rep_type,
- int true_value,
- int false_value) {
- // Close the given Diamond d using a Phi node, taking care of constructing the
- // right kind of constants (Int32 or Int64) based on rep_type.
- if (rep_type == SimdType::kFloat64x2 || rep_type == SimdType::kInt64x2) {
- MachineRepresentation rep = MachineRepresentation::kWord64;
- return d.Phi(rep, mcgraph_->Int64Constant(true_value),
- mcgraph_->Int64Constant(false_value));
- } else {
- MachineRepresentation rep =
- (rep_type == SimdType::kFloat32x4)
- ? MachineRepresentation::kWord32
- : MachineTypeFrom(rep_type).representation();
- return d.Phi(rep, mcgraph_->Int32Constant(true_value),
- mcgraph_->Int32Constant(false_value));
- }
-}
-
-void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
- const Operator* op) {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- int num_lanes = NumLanes(input_rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(op, rep_left[i], rep_right[i]));
- rep_node[i] = ConstructPhiForComparison(d, input_rep_type, 0, -1);
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-void SimdScalarLowering::LowerBitMaskOp(Node* node, SimdType rep_type,
- int msb_index) {
- Node** reps = GetReplacementsWithType(node->InputAt(0), rep_type);
- int num_lanes = NumLanes(rep_type);
- Node** rep_node = zone()->NewArray<Node*>(1);
- Node* result = mcgraph_->Int32Constant(0);
- uint32_t mask = 1 << msb_index;
-
- for (int i = 0; i < num_lanes; ++i) {
- // Lane i should end up at bit i in the final result.
- // +-----------------------------------------------------------------+
- // | | msb_index | (i < msb_index) | (i > msb_index) |
- // +-------+-----------+----------------------+----------------------+
- // | i8x16 | 7 | msb >> (msb_index-i) | msb << (i-msb_index) |
- // | i16x8 | 15 | msb >> (msb_index-i) | n/a |
- // | i32x4 | 31 | msb >> (msb_index-i) | n/a |
- // +-------+-----------+----------------------+----------------------+
- Node* msb = Mask(reps[i], mask);
-
- if (i < msb_index) {
- int shift = msb_index - i;
- Node* shifted = graph()->NewNode(machine()->Word32Shr(), msb,
- mcgraph_->Int32Constant(shift));
- result = graph()->NewNode(machine()->Word32Or(), shifted, result);
- } else if (i > msb_index) {
- int shift = i - msb_index;
- Node* shifted = graph()->NewNode(machine()->Word32Shl(), msb,
- mcgraph_->Int32Constant(shift));
- result = graph()->NewNode(machine()->Word32Or(), shifted, result);
- } else {
- result = graph()->NewNode(machine()->Word32Or(), msb, result);
- }
- }
-
- rep_node[0] = result;
- ReplaceNode(node, rep_node, 1);
-}
-
-void SimdScalarLowering::LowerAllTrueOp(Node* node, SimdType rep_type) {
- // AllTrue ops require the input to be of a particular SimdType, but the op
- // itself is always replaced by a Int32x4 with 1 node.
- int num_lanes = NumLanes(rep_type);
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node* zero;
- Node* tmp_result;
- MachineRepresentation result_rep = MachineRepresentation::kWord32;
- const Operator* equals;
-
- if (SimdType::kInt64x2 == rep_type) {
- zero = mcgraph_->Int64Constant(0);
- tmp_result = mcgraph_->Int64Constant(1);
- result_rep = MachineRepresentation::kWord64;
- equals = machine()->Word64Equal();
- } else {
- zero = mcgraph_->Int32Constant(0);
- tmp_result = mcgraph_->Int32Constant(1);
- equals = machine()->Word32Equal();
- }
-
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(), graph()->NewNode(equals, rep[i], zero));
- tmp_result = d.Phi(result_rep, zero, tmp_result);
- }
-
- if (SimdType::kInt64x2 == rep_type) {
- tmp_result =
- graph()->NewNode(machine()->TruncateInt64ToInt32(), tmp_result);
- }
-
- rep_node[0] = tmp_result;
- ReplaceNode(node, rep_node, 1);
-}
-
-void SimdScalarLowering::LowerFloatPseudoMinMax(Node* node, const Operator* op,
- bool is_max, SimdType type) {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
- int num_lanes = NumLanes(type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- MachineRepresentation rep = MachineTypeFrom(type).representation();
- for (int i = 0; i < num_lanes; ++i) {
- Node* cmp = is_max ? graph()->NewNode(op, rep_left[i], rep_right[i])
- : graph()->NewNode(op, rep_right[i], rep_left[i]);
- Diamond d(graph(), common(), cmp);
- rep_node[i] = d.Phi(rep, rep_right[i], rep_left[i]);
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-void SimdScalarLowering::LowerNode(Node* node) {
- SimdType rep_type = ReplacementType(node);
- int num_lanes = NumLanes(rep_type);
- switch (node->opcode()) {
- case IrOpcode::kS128Const: {
- // We could use GetReplacementsWithType for all this, but it adds a lot of
- // nodes, so sign extend the immediates ourselves here.
- DCHECK_EQ(0, node->InputCount());
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- S128ImmediateParameter params = S128ImmediateParameterOf(node->op());
-
- // For all the small ints below, we have a choice of static_cast or bit
- // twiddling, clang seems to be able to optimize either
- // (https://godbolt.org/z/9c65o8) so use static_cast for clarity.
- switch (rep_type) {
- case SimdType::kInt8x16: {
- for (int i = 0; i < num_lanes; ++i) {
- Address data_address = reinterpret_cast<Address>(params.data() + i);
- rep_node[i] = mcgraph_->Int32Constant(static_cast<int32_t>(
- base::ReadLittleEndianValue<int8_t>(data_address)));
- }
- break;
- }
- case SimdType::kInt16x8: {
- int16_t val[kNumLanes16];
- base::Memcpy(val, params.data(), kSimd128Size);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = mcgraph_->Int32Constant(static_cast<int32_t>(
- base::ReadLittleEndianValue<int16_t>(&val[i])));
- }
- break;
- }
- case SimdType::kInt32x4: {
- uint32_t val[kNumLanes32];
- base::Memcpy(val, params.data(), kSimd128Size);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = mcgraph_->Int32Constant(
- base::ReadLittleEndianValue<uint32_t>(&val[i]));
- }
- break;
- }
- case SimdType::kInt64x2: {
- uint64_t val[kNumLanes64];
- base::Memcpy(val, params.data(), kSimd128Size);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = mcgraph_->Int64Constant(
- base::ReadLittleEndianValue<uint64_t>(&val[i]));
- }
- break;
- }
- case SimdType::kFloat32x4: {
- float val[kNumLanes32];
- base::Memcpy(val, params.data(), kSimd128Size);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = mcgraph_->Float32Constant(
- base::ReadLittleEndianValue<float>(&val[i]));
- }
- break;
- }
- case SimdType::kFloat64x2: {
- double val[kNumLanes64];
- base::Memcpy(val, params.data(), kSimd128Size);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = mcgraph_->Float64Constant(
- base::ReadLittleEndianValue<double>(&val[i]));
- }
- break;
- }
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kStart: {
- int parameter_count = GetParameterCountAfterLowering();
- // Only exchange the node if the parameter count actually changed.
- if (parameter_count != static_cast<int>(signature()->parameter_count())) {
- int delta =
- parameter_count - static_cast<int>(signature()->parameter_count());
- int new_output_count = node->op()->ValueOutputCount() + delta;
- NodeProperties::ChangeOp(node, common()->Start(new_output_count));
- }
- break;
- }
- case IrOpcode::kParameter: {
- DCHECK_EQ(1, node->InputCount());
- int param_count = static_cast<int>(signature()->parameter_count());
- // Only exchange the node if the parameter count actually changed. We do
- // not even have to do the default lowering because the start node,
- // the only input of a parameter node, only changes if the parameter count
- // changes.
- if (GetParameterCountAfterLowering() != param_count) {
- int old_index = ParameterIndexOf(node->op());
- // Parameter index 0 is the instance parameter, we will use old_index to
- // index into the function signature, so we need to decrease it by 1.
- --old_index;
- int new_index =
- GetParameterIndexAfterLoweringSimd128(signature(), old_index);
- // Similarly, the index into function signature needs to account for the
- // instance parameter, so increase it by 1.
- ++new_index;
- NodeProperties::ChangeOp(node, common()->Parameter(new_index));
-
- if (old_index < 0) {
- break;
- }
-
- DCHECK(old_index < param_count);
-
- if (signature()->GetParam(old_index) ==
- MachineRepresentation::kSimd128) {
- Node* new_node[kNumLanes32];
- new_node[0] = node;
- for (int i = 1; i < kNumLanes32; ++i) {
- new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
- graph()->start());
- }
- ReplaceNode(node, new_node, kNumLanes32);
- }
- }
- break;
- }
- case IrOpcode::kSimd128ReverseBytes: {
- DCHECK_EQ(1, node->InputCount());
- SimdType input_type = ReplacementType(node->InputAt(0));
- bool is_float = input_type == SimdType::kFloat32x4 ||
- input_type == SimdType::kFloat64x2;
- replacements_[node->id()].type =
- is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4;
- Node** rep = GetReplacementsWithType(
- node->InputAt(0),
- is_float ? SimdType::kFloat32x4 : SimdType::kInt32x4);
- Node* rep_node[kNumLanes32];
- for (int i = 0; i < kNumLanes32; ++i) {
- Node* temp = is_float ? graph()->NewNode(
- machine()->BitcastFloat32ToInt32(), rep[i])
- : rep[i];
- temp = graph()->NewNode(machine()->Word32ReverseBytes(), temp);
- rep_node[kNumLanes32 - 1 - i] =
- is_float
- ? graph()->NewNode(machine()->BitcastInt32ToFloat32(), temp)
- : temp;
- }
- ReplaceNode(node, rep_node, kNumLanes32);
- break;
- }
- case IrOpcode::kLoad:
- case IrOpcode::kLoadFromObject:
- case IrOpcode::kUnalignedLoad:
- case IrOpcode::kProtectedLoad:
- case IrOpcode::kLoadImmutable: {
- LowerLoadOp(node, rep_type);
- break;
- }
- case IrOpcode::kLoadTransform: {
- LowerLoadTransformOp(node, rep_type);
- break;
- }
- case IrOpcode::kStore:
- case IrOpcode::kStoreToObject:
- case IrOpcode::kUnalignedStore:
- case IrOpcode::kProtectedStore: {
- LowerStoreOp(node);
- break;
- }
- case IrOpcode::kReturn: {
- int old_input_count = node->InputCount();
- int return_arity = static_cast<int>(signature()->return_count());
- for (int i = 0; i < return_arity; i++) {
- if (signature()->GetReturn(i) != MachineRepresentation::kSimd128) {
- continue;
- }
-
- // Return nodes have a hidden input at value 0.
- Node* input = node->InputAt(i + 1);
- if (!HasReplacement(0, input)) {
- continue;
- }
-
- // V128 return types are lowered to i32x4.
- Node** reps = GetReplacementsWithType(input, rep_type);
- ReplaceNode(input, reps, NumLanes(rep_type));
- }
-
- DefaultLowering(node);
- // Nothing needs to be done here since inputs did not change.
- if (old_input_count == node->InputCount()) {
- break;
- }
-
- int new_return_count = GetReturnCountAfterLoweringSimd128(signature());
- if (static_cast<int>(signature()->return_count()) != new_return_count) {
- NodeProperties::ChangeOp(node, common()->Return(new_return_count));
- }
- break;
- }
- case IrOpcode::kCall: {
- // TODO(turbofan): Make wasm code const-correct wrt. CallDescriptor.
- auto call_descriptor =
- const_cast<CallDescriptor*>(CallDescriptorOf(node->op()));
- bool returns_require_lowering =
- GetReturnCountAfterLoweringSimd128(call_descriptor) !=
- static_cast<int>(call_descriptor->ReturnCount());
-
- // All call arguments are lowered to i32x4 in the call descriptor, so the
- // arguments need to be converted to i32x4 as well.
- for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
- Node* input = node->InputAt(i);
- if (ReplacementCount(input) == 1) {
- // Special case for extract lanes
- Node** reps = GetReplacements(input);
- ReplaceNode(input, reps, 1);
- } else if (HasReplacement(0, input)) {
- Node** reps = GetReplacementsWithType(input, SimdType::kInt32x4);
- ReplaceNode(input, reps, NumLanes(SimdType::kInt32x4));
- }
- }
-
- if (DefaultLowering(node) || returns_require_lowering) {
- // We have to adjust the call descriptor.
- const Operator* op = common()->Call(
- GetI32WasmCallDescriptorForSimd(zone(), call_descriptor));
- NodeProperties::ChangeOp(node, op);
- }
-
- if (!returns_require_lowering) {
- break;
- }
-
- size_t return_arity = call_descriptor->ReturnCount();
-
- if (return_arity == 1) {
- // We access the additional return values through projections.
- // Special case for return_arity 1, with multi-returns, we would have
- // already built projections for each return value, and will be handled
- // by the following code.
- Node* rep_node[kNumLanes32];
- for (int i = 0; i < kNumLanes32; ++i) {
- rep_node[i] =
- graph()->NewNode(common()->Projection(i), node, graph()->start());
- }
- ReplaceNode(node, rep_node, kNumLanes32);
- break;
- }
-
- ZoneVector<Node*> projections(return_arity, zone());
- NodeProperties::CollectValueProjections(node, projections.data(),
- return_arity);
-
- for (size_t old_index = 0, new_index = 0; old_index < return_arity;
- ++old_index, ++new_index) {
- Node* use_node = projections[old_index];
- DCHECK_EQ(ProjectionIndexOf(use_node->op()), old_index);
- DCHECK_EQ(GetReturnIndexAfterLowering(call_descriptor,
- static_cast<int>(old_index)),
- static_cast<int>(new_index));
- if (new_index != old_index) {
- NodeProperties::ChangeOp(use_node, common()->Projection(new_index));
- }
- if (call_descriptor->GetReturnType(old_index).representation() ==
- MachineRepresentation::kSimd128) {
- Node* rep_node[kNumLanes32];
- for (int i = 0; i < kNumLanes32; ++i) {
- rep_node[i] = graph()->NewNode(common()->Projection(new_index + i),
- node, graph()->start());
- }
- ReplaceNode(use_node, rep_node, kNumLanes32);
- new_index += kNumLanes32 - 1;
- }
- }
- break;
- }
- case IrOpcode::kPhi: {
- MachineRepresentation rep = PhiRepresentationOf(node->op());
- if (rep == MachineRepresentation::kSimd128) {
- // The replacement nodes have already been created, we only have to
- // replace placeholder nodes.
- Node** rep_node = GetReplacements(node);
- for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
- Node** rep_input =
- GetReplacementsWithType(node->InputAt(i), rep_type);
- for (int j = 0; j < num_lanes; j++) {
- rep_node[j]->ReplaceInput(i, rep_input[j]);
- }
- }
- } else {
- DefaultLowering(node);
- }
- break;
- }
- case IrOpcode::kLoopExitValue: {
- if (!HasReplacement(0, node->InputAt(0))) break;
- Node* control = node->InputAt(NodeProperties::FirstControlIndex(node));
- Node** inputs = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; i++) {
- auto op =
- common()->LoopExitValue(MachineTypeFrom(rep_type).representation());
- rep_nodes[i] = graph()->NewNode(op, inputs[i], control);
- }
- ReplaceNode(node, rep_nodes, num_lanes);
- break;
- }
- case IrOpcode::kI64x2Add: {
- LowerBinaryOp(node, rep_type, machine()->Int64Add());
- break;
- }
- case IrOpcode::kI64x2Sub: {
- LowerBinaryOp(node, rep_type, machine()->Int64Sub());
- break;
- }
- case IrOpcode::kI64x2Mul: {
- LowerBinaryOp(node, rep_type, machine()->Int64Mul());
- break;
- }
-#define I32X4_BINOP_CASE(opcode, instruction) \
- case IrOpcode::opcode: { \
- LowerBinaryOp(node, rep_type, machine()->instruction()); \
- break; \
- }
- I32X4_BINOP_CASE(kI32x4Add, Int32Add)
- I32X4_BINOP_CASE(kI32x4Sub, Int32Sub)
- I32X4_BINOP_CASE(kI32x4Mul, Int32Mul)
- I32X4_BINOP_CASE(kS128And, Word32And)
- I32X4_BINOP_CASE(kS128Or, Word32Or)
- I32X4_BINOP_CASE(kS128Xor, Word32Xor)
-#undef I32X4_BINOP_CASE
- case IrOpcode::kI16x8Add:
- case IrOpcode::kI8x16Add: {
- LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
- break;
- }
- case IrOpcode::kI16x8Sub:
- case IrOpcode::kI8x16Sub: {
- LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Sub());
- break;
- }
- case IrOpcode::kI16x8Mul: {
- LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Mul());
- break;
- }
- case IrOpcode::kI16x8AddSatS:
- case IrOpcode::kI8x16AddSatS: {
- LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), true);
- break;
- }
- case IrOpcode::kI16x8SubSatS:
- case IrOpcode::kI8x16SubSatS: {
- LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), true);
- break;
- }
- case IrOpcode::kI16x8AddSatU:
- case IrOpcode::kI8x16AddSatU: {
- LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), false);
- break;
- }
- case IrOpcode::kI16x8SubSatU:
- case IrOpcode::kI8x16SubSatU: {
- LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), false);
- break;
- }
- case IrOpcode::kI32x4MaxS:
- case IrOpcode::kI16x8MaxS:
- case IrOpcode::kI8x16MaxS: {
- LowerIntMinMax(node, machine()->Int32LessThan(), true, rep_type);
- break;
- }
- case IrOpcode::kI32x4MinS:
- case IrOpcode::kI16x8MinS:
- case IrOpcode::kI8x16MinS: {
- LowerIntMinMax(node, machine()->Int32LessThan(), false, rep_type);
- break;
- }
- case IrOpcode::kI32x4MaxU:
- case IrOpcode::kI16x8MaxU:
- case IrOpcode::kI8x16MaxU: {
- LowerIntMinMax(node, machine()->Uint32LessThan(), true, rep_type);
- break;
- }
- case IrOpcode::kI32x4MinU:
- case IrOpcode::kI16x8MinU:
- case IrOpcode::kI8x16MinU: {
- LowerIntMinMax(node, machine()->Uint32LessThan(), false, rep_type);
- break;
- }
- case IrOpcode::kI32x4DotI16x8S: {
- // i32x4.dot_i16x8_s wants the inputs to be i16x8, but outputs to i32x4.
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left =
- GetReplacementsWithType(node->InputAt(0), SimdType::kInt16x8);
- Node** rep_right =
- GetReplacementsWithType(node->InputAt(1), SimdType::kInt16x8);
- int num_lanes = NumLanes(rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- Node* lo = graph()->NewNode(machine()->Int32Mul(), rep_left[i * 2],
- rep_right[i * 2]);
- Node* hi = graph()->NewNode(machine()->Int32Mul(), rep_left[i * 2 + 1],
- rep_right[i * 2 + 1]);
- rep_node[i] = graph()->NewNode(machine()->Int32Add(), lo, hi);
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kI64x2Neg: {
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- int num_lanes = NumLanes(rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- Node* zero = graph()->NewNode(common()->Int64Constant(0));
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = graph()->NewNode(machine()->Int64Sub(), zero, rep[i]);
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kI32x4Neg:
- case IrOpcode::kI16x8Neg:
- case IrOpcode::kI8x16Neg: {
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- int num_lanes = NumLanes(rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- Node* zero = graph()->NewNode(common()->Int32Constant(0));
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = graph()->NewNode(machine()->Int32Sub(), zero, rep[i]);
- if (node->opcode() == IrOpcode::kI16x8Neg) {
- rep_node[i] = FixUpperBits(rep_node[i], kShift16);
- } else if (node->opcode() == IrOpcode::kI8x16Neg) {
- rep_node[i] = FixUpperBits(rep_node[i], kShift8);
- }
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kI32x4Abs:
- case IrOpcode::kI16x8Abs:
- case IrOpcode::kI8x16Abs: {
- // From https://stackoverflow.com/a/14194764
- // abs(x) = (x XOR y) - y
- Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- // It's fine to shift by 31 even for i8x16 since each node is
- // effectively expanded to 32 bits.
- Node* y = graph()->NewNode(machine()->Word32Sar(), rep[i],
- mcgraph_->Int32Constant(31));
- rep_node[i] = graph()->NewNode(
- machine()->Int32Sub(),
- graph()->NewNode(machine()->Word32Xor(), rep[i], y), y);
- if (node->opcode() == IrOpcode::kI16x8Neg) {
- rep_node[i] = FixUpperBits(rep_node[i], kShift16);
- } else if (node->opcode() == IrOpcode::kI8x16Neg) {
- rep_node[i] = FixUpperBits(rep_node[i], kShift8);
- }
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kS128Zero: {
- DCHECK_EQ(0, node->InputCount());
- Node* rep_node[kNumLanes32];
- for (int i = 0; i < kNumLanes32; ++i) {
- rep_node[i] = mcgraph_->Int32Constant(0);
- }
- ReplaceNode(node, rep_node, kNumLanes32);
- break;
- }
- case IrOpcode::kS128Not: {
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node* rep_node[kNumLanes32];
- Node* mask = graph()->NewNode(common()->Int32Constant(0xFFFFFFFF));
- for (int i = 0; i < kNumLanes32; ++i) {
- rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
- }
- ReplaceNode(node, rep_node, kNumLanes32);
- break;
- }
- case IrOpcode::kS128AndNot: {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
- int num_lanes = NumLanes(rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- Node* mask = graph()->NewNode(common()->Int32Constant(0xFFFFFFFF));
- for (int i = 0; i < num_lanes; ++i) {
- Node* not_rep_right =
- graph()->NewNode(machine()->Word32Xor(), rep_right[i], mask);
- rep_node[i] = graph()->NewNode(machine()->Word32And(), rep_left[i],
- not_rep_right);
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kI32x4SConvertF32x4: {
- LowerConvertFromFloat(node, true);
- break;
- }
- case IrOpcode::kI32x4UConvertF32x4: {
- LowerConvertFromFloat(node, false);
- break;
- }
- case IrOpcode::kI32x4SConvertI16x8Low: {
- LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, true,
- 0);
- break;
- }
- case IrOpcode::kI32x4SConvertI16x8High: {
- LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, true,
- 4);
- break;
- }
- case IrOpcode::kI32x4UConvertI16x8Low: {
- LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, false,
- 0);
- break;
- }
- case IrOpcode::kI32x4UConvertI16x8High: {
- LowerConvertFromInt(node, SimdType::kInt16x8, SimdType::kInt32x4, false,
- 4);
- break;
- }
- case IrOpcode::kI16x8SConvertI8x16Low: {
- LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, true,
- 0);
- break;
- }
- case IrOpcode::kI16x8SConvertI8x16High: {
- LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, true,
- 8);
- break;
- }
- case IrOpcode::kI16x8UConvertI8x16Low: {
- LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, false,
- 0);
- break;
- }
- case IrOpcode::kI16x8UConvertI8x16High: {
- LowerConvertFromInt(node, SimdType::kInt8x16, SimdType::kInt16x8, false,
- 8);
- break;
- }
- case IrOpcode::kI16x8SConvertI32x4: {
- LowerPack(node, SimdType::kInt32x4, SimdType::kInt16x8, true);
- break;
- }
- case IrOpcode::kI16x8UConvertI32x4: {
- LowerPack(node, SimdType::kInt32x4, SimdType::kInt16x8, false);
- break;
- }
- case IrOpcode::kI8x16SConvertI16x8: {
- LowerPack(node, SimdType::kInt16x8, SimdType::kInt8x16, true);
- break;
- }
- case IrOpcode::kI8x16UConvertI16x8: {
- LowerPack(node, SimdType::kInt16x8, SimdType::kInt8x16, false);
- break;
- }
- case IrOpcode::kI64x2Shl:
- case IrOpcode::kI32x4Shl:
- case IrOpcode::kI16x8Shl:
- case IrOpcode::kI8x16Shl:
- case IrOpcode::kI64x2ShrS:
- case IrOpcode::kI32x4ShrS:
- case IrOpcode::kI16x8ShrS:
- case IrOpcode::kI8x16ShrS:
- case IrOpcode::kI64x2ShrU:
- case IrOpcode::kI32x4ShrU:
- case IrOpcode::kI16x8ShrU:
- case IrOpcode::kI8x16ShrU: {
- LowerShiftOp(node, rep_type);
- break;
- }
-#define F32X4_BINOP_CASE(name) \
- case IrOpcode::kF32x4##name: { \
- LowerBinaryOp(node, rep_type, machine()->Float32##name()); \
- break; \
- }
- F32X4_BINOP_CASE(Add)
- F32X4_BINOP_CASE(Sub)
- F32X4_BINOP_CASE(Mul)
- F32X4_BINOP_CASE(Div)
- F32X4_BINOP_CASE(Min)
- F32X4_BINOP_CASE(Max)
- case IrOpcode::kF32x4Pmin: {
- LowerFloatPseudoMinMax(node, machine()->Float32LessThan(), false,
- rep_type);
- break;
- }
- case IrOpcode::kF32x4Pmax: {
- LowerFloatPseudoMinMax(node, machine()->Float32LessThan(), true,
- rep_type);
- break;
- }
-#undef F32X4_BINOP_CASE
-#define F32X4_UNOP_CASE(name) \
- case IrOpcode::kF32x4##name: { \
- LowerUnaryOp(node, rep_type, machine()->Float32##name()); \
- break; \
- }
- F32X4_UNOP_CASE(Abs)
- F32X4_UNOP_CASE(Neg)
- F32X4_UNOP_CASE(Sqrt)
-#undef F32X4_UNOP_CASE
- case IrOpcode::kF32x4Ceil: {
- LowerUnaryOp(node, rep_type, machine()->Float32RoundUp().op());
- break;
- }
- case IrOpcode::kF32x4Floor: {
- LowerUnaryOp(node, rep_type, machine()->Float32RoundDown().op());
- break;
- }
- case IrOpcode::kF32x4Trunc: {
- LowerUnaryOp(node, rep_type, machine()->Float32RoundTruncate().op());
- break;
- }
- case IrOpcode::kF32x4NearestInt: {
- LowerUnaryOp(node, rep_type, machine()->Float32RoundTiesEven().op());
- break;
- }
- case IrOpcode::kF32x4RecipApprox:
- case IrOpcode::kF32x4RecipSqrtApprox: {
- DCHECK_EQ(1, node->InputCount());
- Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- Node* float_one = graph()->NewNode(common()->Float32Constant(1.0));
- for (int i = 0; i < num_lanes; ++i) {
- Node* tmp = rep[i];
- if (node->opcode() == IrOpcode::kF32x4RecipSqrtApprox) {
- tmp = graph()->NewNode(machine()->Float32Sqrt(), rep[i]);
- }
- rep_node[i] = graph()->NewNode(machine()->Float32Div(), float_one, tmp);
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kF32x4SConvertI32x4: {
- LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundInt32ToFloat32());
- break;
- }
- case IrOpcode::kF32x4UConvertI32x4: {
- LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
- break;
- }
- case IrOpcode::kF64x2Abs: {
- LowerUnaryOp(node, rep_type, machine()->Float64Abs());
- break;
- }
- case IrOpcode::kF64x2Neg: {
- LowerUnaryOp(node, rep_type, machine()->Float64Neg());
- break;
- }
- case IrOpcode::kF64x2Sqrt: {
- LowerUnaryOp(node, rep_type, machine()->Float64Sqrt());
- break;
- }
- case IrOpcode::kF64x2Add: {
- LowerBinaryOp(node, rep_type, machine()->Float64Add());
- break;
- }
- case IrOpcode::kF64x2Sub: {
- LowerBinaryOp(node, rep_type, machine()->Float64Sub());
- break;
- }
- case IrOpcode::kF64x2Mul: {
- LowerBinaryOp(node, rep_type, machine()->Float64Mul());
- break;
- }
- case IrOpcode::kF64x2Div: {
- LowerBinaryOp(node, rep_type, machine()->Float64Div());
- break;
- }
- case IrOpcode::kF64x2Min: {
- LowerBinaryOp(node, rep_type, machine()->Float64Min());
- break;
- }
- case IrOpcode::kF64x2Max: {
- LowerBinaryOp(node, rep_type, machine()->Float64Max());
- break;
- }
- case IrOpcode::kF64x2Pmin: {
- LowerFloatPseudoMinMax(node, machine()->Float64LessThan(), false,
- rep_type);
- break;
- }
- case IrOpcode::kF64x2Pmax: {
- LowerFloatPseudoMinMax(node, machine()->Float64LessThan(), true,
- rep_type);
- break;
- }
- case IrOpcode::kF64x2Ceil: {
- LowerUnaryOp(node, rep_type, machine()->Float64RoundUp().op());
- break;
- }
- case IrOpcode::kF64x2Floor: {
- LowerUnaryOp(node, rep_type, machine()->Float64RoundDown().op());
- break;
- }
- case IrOpcode::kF64x2Trunc: {
- LowerUnaryOp(node, rep_type, machine()->Float64RoundTruncate().op());
- break;
- }
- case IrOpcode::kF64x2NearestInt: {
- LowerUnaryOp(node, rep_type, machine()->Float64RoundTiesEven().op());
- break;
- }
- case IrOpcode::kF64x2Splat:
- case IrOpcode::kF32x4Splat:
- case IrOpcode::kI64x2Splat:
- case IrOpcode::kI32x4Splat:
- case IrOpcode::kI16x8Splat:
- case IrOpcode::kI8x16Splat: {
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- Node* val = (HasReplacement(0, node->InputAt(0)))
- ? GetReplacements(node->InputAt(0))[0]
- : node->InputAt(0);
-
- // I16 and I8 are placed in Word32 nodes, we need to mask them
- // accordingly, to account for overflows, then sign extend them.
- if (node->opcode() == IrOpcode::kI16x8Splat) {
- val = graph()->NewNode(machine()->SignExtendWord16ToInt32(),
- Mask(val, kMask16));
- } else if (node->opcode() == IrOpcode::kI8x16Splat) {
- val = graph()->NewNode(machine()->SignExtendWord8ToInt32(),
- Mask(val, kMask8));
- }
-
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = val;
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kF64x2ExtractLane:
- case IrOpcode::kF32x4ExtractLane:
- case IrOpcode::kI64x2ExtractLane:
- case IrOpcode::kI32x4ExtractLane:
- case IrOpcode::kI16x8ExtractLaneU:
- case IrOpcode::kI16x8ExtractLaneS:
- case IrOpcode::kI8x16ExtractLaneU:
- case IrOpcode::kI8x16ExtractLaneS: {
- int32_t lane = OpParameter<int32_t>(node->op());
- Node** rep_node = zone()->NewArray<Node*>(1);
- rep_node[0] = GetReplacementsWithType(node->InputAt(0), rep_type)[lane];
-
- // If unsigned, mask the top bits.
- if (node->opcode() == IrOpcode::kI16x8ExtractLaneU) {
- rep_node[0] = Mask(rep_node[0], kMask16);
- } else if (node->opcode() == IrOpcode::kI8x16ExtractLaneU) {
- rep_node[0] = Mask(rep_node[0], kMask8);
- }
-
- ReplaceNode(node, rep_node, 1);
- break;
- }
- case IrOpcode::kF64x2ReplaceLane:
- case IrOpcode::kF32x4ReplaceLane:
- case IrOpcode::kI64x2ReplaceLane:
- case IrOpcode::kI32x4ReplaceLane:
- case IrOpcode::kI16x8ReplaceLane:
- case IrOpcode::kI8x16ReplaceLane: {
- DCHECK_EQ(2, node->InputCount());
- Node* repNode = node->InputAt(1);
- int32_t lane = OpParameter<int32_t>(node->op());
- Node** old_rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_node[i] = old_rep_node[i];
- }
- if (HasReplacement(0, repNode)) {
- rep_node[lane] = GetReplacements(repNode)[0];
- } else {
- rep_node[lane] = repNode;
- }
-
- // The replacement nodes for these opcodes are in Word32, and we always
- // store nodes in sign extended form (and mask to account for overflows.)
- if (node->opcode() == IrOpcode::kI16x8ReplaceLane) {
- rep_node[lane] = graph()->NewNode(machine()->SignExtendWord16ToInt32(),
- Mask(rep_node[lane], kMask16));
- } else if (node->opcode() == IrOpcode::kI8x16ReplaceLane) {
- rep_node[lane] = graph()->NewNode(machine()->SignExtendWord8ToInt32(),
- Mask(rep_node[lane], kMask8));
- }
-
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
-#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
- case IrOpcode::simd_op: { \
- LowerCompareOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
- break; \
- }
- COMPARISON_CASE(Float64x2, kF64x2Eq, Float64Equal, false)
- COMPARISON_CASE(Float64x2, kF64x2Lt, Float64LessThan, false)
- COMPARISON_CASE(Float64x2, kF64x2Le, Float64LessThanOrEqual, false)
- COMPARISON_CASE(Float32x4, kF32x4Eq, Float32Equal, false)
- COMPARISON_CASE(Float32x4, kF32x4Lt, Float32LessThan, false)
- COMPARISON_CASE(Float32x4, kF32x4Le, Float32LessThanOrEqual, false)
- COMPARISON_CASE(Float32x4, kF32x4Gt, Float32LessThan, true)
- COMPARISON_CASE(Float32x4, kF32x4Ge, Float32LessThanOrEqual, true)
- COMPARISON_CASE(Int64x2, kI64x2Eq, Word64Equal, false)
- COMPARISON_CASE(Int32x4, kI32x4Eq, Word32Equal, false)
- COMPARISON_CASE(Int32x4, kI32x4LtS, Int32LessThan, false)
- COMPARISON_CASE(Int32x4, kI32x4LeS, Int32LessThanOrEqual, false)
- COMPARISON_CASE(Int32x4, kI32x4GtS, Int32LessThan, true)
- COMPARISON_CASE(Int32x4, kI32x4GeS, Int32LessThanOrEqual, true)
- COMPARISON_CASE(Int32x4, kI32x4LtU, Uint32LessThan, false)
- COMPARISON_CASE(Int32x4, kI32x4LeU, Uint32LessThanOrEqual, false)
- COMPARISON_CASE(Int32x4, kI32x4GtU, Uint32LessThan, true)
- COMPARISON_CASE(Int32x4, kI32x4GeU, Uint32LessThanOrEqual, true)
- COMPARISON_CASE(Int16x8, kI16x8Eq, Word32Equal, false)
- COMPARISON_CASE(Int16x8, kI16x8LtS, Int32LessThan, false)
- COMPARISON_CASE(Int16x8, kI16x8LeS, Int32LessThanOrEqual, false)
- COMPARISON_CASE(Int16x8, kI16x8GtS, Int32LessThan, true)
- COMPARISON_CASE(Int16x8, kI16x8GeS, Int32LessThanOrEqual, true)
- COMPARISON_CASE(Int16x8, kI16x8LtU, Uint32LessThan, false)
- COMPARISON_CASE(Int16x8, kI16x8LeU, Uint32LessThanOrEqual, false)
- COMPARISON_CASE(Int16x8, kI16x8GtU, Uint32LessThan, true)
- COMPARISON_CASE(Int16x8, kI16x8GeU, Uint32LessThanOrEqual, true)
- COMPARISON_CASE(Int8x16, kI8x16Eq, Word32Equal, false)
- COMPARISON_CASE(Int8x16, kI8x16LtS, Int32LessThan, false)
- COMPARISON_CASE(Int8x16, kI8x16LeS, Int32LessThanOrEqual, false)
- COMPARISON_CASE(Int8x16, kI8x16GtS, Int32LessThan, true)
- COMPARISON_CASE(Int8x16, kI8x16GeS, Int32LessThanOrEqual, true)
- COMPARISON_CASE(Int8x16, kI8x16LtU, Uint32LessThan, false)
- COMPARISON_CASE(Int8x16, kI8x16LeU, Uint32LessThanOrEqual, false)
- COMPARISON_CASE(Int8x16, kI8x16GtU, Uint32LessThan, true)
- COMPARISON_CASE(Int8x16, kI8x16GeU, Uint32LessThanOrEqual, true)
-#undef COMPARISON_CASE
- case IrOpcode::kF64x2Ne: {
- LowerNotEqual(node, SimdType::kFloat64x2, machine()->Float64Equal());
- break;
- }
- case IrOpcode::kF32x4Ne: {
- LowerNotEqual(node, SimdType::kFloat32x4, machine()->Float32Equal());
- break;
- }
- case IrOpcode::kI64x2Ne: {
- LowerNotEqual(node, SimdType::kInt64x2, machine()->Word64Equal());
- break;
- }
- case IrOpcode::kI32x4Ne: {
- LowerNotEqual(node, SimdType::kInt32x4, machine()->Word32Equal());
- break;
- }
- case IrOpcode::kI16x8Ne: {
- LowerNotEqual(node, SimdType::kInt16x8, machine()->Word32Equal());
- break;
- }
- case IrOpcode::kI8x16Ne: {
- LowerNotEqual(node, SimdType::kInt8x16, machine()->Word32Equal());
- break;
- }
- case IrOpcode::kS128Select: {
- DCHECK_EQ(3, node->InputCount());
- Node** boolean_input =
- GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- Node* tmp1 =
- graph()->NewNode(machine()->Word32Xor(), rep_left[i], rep_right[i]);
- Node* tmp2 =
- graph()->NewNode(machine()->Word32And(), boolean_input[i], tmp1);
- rep_node[i] =
- graph()->NewNode(machine()->Word32Xor(), rep_right[i], tmp2);
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
- case IrOpcode::kI8x16Swizzle: {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** indices = GetReplacementsWithType(node->InputAt(1), rep_type);
- Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
- Node* stack_slot = graph()->NewNode(
- machine()->StackSlot(MachineRepresentation::kSimd128));
-
- // Push all num_lanes values into stack slot.
- const Operator* store_op = machine()->Store(
- StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier));
- Node* effect_input = graph()->start();
- for (int i = num_lanes - 1; i >= 0; i--) {
- // We want all the stores to happen first before any of the loads
- // below, so connect them via effect edge from i-1 to i.
- Node* store =
- graph()->NewNode(store_op, stack_slot, mcgraph_->Int32Constant(i),
- rep_left[i], effect_input, graph()->start());
- effect_input = store;
- }
-
- for (int i = num_lanes - 1; i >= 0; i--) {
- // Only select lane when index is < num_lanes, otherwise write 0 to
- // lane. Use Uint32 to take care of negative indices.
- Diamond d(graph(), common(),
- graph()->NewNode(machine()->Uint32LessThan(), indices[i],
- mcgraph_->Int32Constant(num_lanes)));
-
- Node* load =
- graph()->NewNode(machine()->Load(LoadRepresentation::Uint8()),
- stack_slot, indices[i], effect_input, d.if_true);
-
- rep_nodes[i] = d.Phi(MachineRepresentation::kWord8, load,
- mcgraph_->Int32Constant(0));
- }
-
- ReplaceNode(node, rep_nodes, num_lanes);
- break;
- }
- case IrOpcode::kI8x16Shuffle: {
- DCHECK_EQ(2, node->InputCount());
- S128ImmediateParameter shuffle = S128ImmediateParameterOf(node->op());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(16);
- for (int i = 0; i < 16; i++) {
- int lane = shuffle[i];
- rep_node[i] = lane < 16 ? rep_left[lane] : rep_right[lane - 16];
- }
- ReplaceNode(node, rep_node, 16);
- break;
- }
- case IrOpcode::kV128AnyTrue: {
- DCHECK_EQ(1, node->InputCount());
- // AnyTrue always returns a I32x4, and can work with inputs of any shape,
- // but we still need GetReplacementsWithType if input is float.
- DCHECK_EQ(ReplacementType(node), SimdType::kInt32x4);
- Node** reps = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_node = zone()->NewArray<Node*>(1);
- Node* true_node = mcgraph_->Int32Constant(1);
- Node* zero = mcgraph_->Int32Constant(0);
- Node* tmp_result = zero;
- for (int i = 0; i < num_lanes; ++i) {
- Diamond d(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), reps[i], zero));
- tmp_result =
- d.Phi(MachineRepresentation::kWord32, tmp_result, true_node);
- }
- rep_node[0] = tmp_result;
- ReplaceNode(node, rep_node, 1);
- break;
- }
- case IrOpcode::kI64x2AllTrue: {
- LowerAllTrueOp(node, SimdType::kInt64x2);
- break;
- }
- case IrOpcode::kI32x4AllTrue: {
- LowerAllTrueOp(node, SimdType::kInt32x4);
- break;
- }
- case IrOpcode::kI16x8AllTrue: {
- LowerAllTrueOp(node, SimdType::kInt16x8);
- break;
- }
- case IrOpcode::kI8x16AllTrue: {
- LowerAllTrueOp(node, SimdType::kInt8x16);
- break;
- }
- case IrOpcode::kI8x16BitMask: {
- LowerBitMaskOp(node, rep_type, 7);
- break;
- }
- case IrOpcode::kI16x8BitMask: {
- LowerBitMaskOp(node, rep_type, 15);
- break;
- }
- case IrOpcode::kI32x4BitMask: {
- LowerBitMaskOp(node, rep_type, 31);
- break;
- }
- case IrOpcode::kI8x16RoundingAverageU:
- case IrOpcode::kI16x8RoundingAverageU: {
- DCHECK_EQ(2, node->InputCount());
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
- int num_lanes = NumLanes(rep_type);
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- // Nodes are stored signed, so mask away the top bits.
- // rounding_average(left, right) = (left + right + 1) >> 1
- const int bit_mask = num_lanes == 16 ? kMask8 : kMask16;
- for (int i = 0; i < num_lanes; ++i) {
- Node* mask_left = graph()->NewNode(machine()->Word32And(), rep_left[i],
- mcgraph_->Int32Constant(bit_mask));
- Node* mask_right =
- graph()->NewNode(machine()->Word32And(), rep_right[i],
- mcgraph_->Int32Constant(bit_mask));
- Node* left_plus_right_plus_one = graph()->NewNode(
- machine()->Int32Add(),
- graph()->NewNode(machine()->Int32Add(), mask_left, mask_right),
- mcgraph_->Int32Constant(1));
- rep_node[i] =
- graph()->NewNode(machine()->Word32Shr(), left_plus_right_plus_one,
- mcgraph_->Int32Constant(1));
- }
- ReplaceNode(node, rep_node, num_lanes);
- break;
- }
-#define LOWER_EXT_MUL(OP, MULTIPLY, INPUT_TYPE, LOW, SIGNED) \
- case IrOpcode::OP: { \
- LowerExtMul(node, machine()->MULTIPLY(), rep_type, SimdType::INPUT_TYPE, \
- LOW, SIGNED); \
- break; \
- }
- LOWER_EXT_MUL(kI16x8ExtMulLowI8x16S, Int32Mul, kInt8x16, true, true)
- LOWER_EXT_MUL(kI16x8ExtMulLowI8x16U, Int32Mul, kInt8x16, true, false)
- LOWER_EXT_MUL(kI16x8ExtMulHighI8x16S, Int32Mul, kInt8x16, false, true)
- LOWER_EXT_MUL(kI16x8ExtMulHighI8x16U, Int32Mul, kInt8x16, false, false)
- LOWER_EXT_MUL(kI32x4ExtMulLowI16x8S, Int32Mul, kInt16x8, true, true)
- LOWER_EXT_MUL(kI32x4ExtMulLowI16x8U, Int32Mul, kInt16x8, true, false)
- LOWER_EXT_MUL(kI32x4ExtMulHighI16x8S, Int32Mul, kInt16x8, false, true)
- LOWER_EXT_MUL(kI32x4ExtMulHighI16x8U, Int32Mul, kInt16x8, false, false)
- LOWER_EXT_MUL(kI64x2ExtMulLowI32x4S, Int64Mul, kInt32x4, true, true)
- LOWER_EXT_MUL(kI64x2ExtMulLowI32x4U, Int64Mul, kInt32x4, true, false)
- LOWER_EXT_MUL(kI64x2ExtMulHighI32x4S, Int64Mul, kInt32x4, false, true)
- LOWER_EXT_MUL(kI64x2ExtMulHighI32x4U, Int64Mul, kInt32x4, false, false)
- default: {
- DefaultLowering(node);
- }
- }
-}
-
-Node* SimdScalarLowering::ExtendNode(Node* node, SimdType rep_type,
- bool is_signed) {
- if (rep_type == SimdType::kInt8x16 && !is_signed) {
- node = Mask(node, kMask8);
- } else if (rep_type == SimdType::kInt16x8 && !is_signed) {
- node = Mask(node, kMask16);
- } else if (rep_type == SimdType::kInt32x4) {
- if (is_signed) {
- node = graph()->NewNode(machine()->SignExtendWord32ToInt64(), node);
- } else {
- node = graph()->NewNode(machine()->ChangeUint32ToUint64(), node);
- }
- }
- return node;
-}
-
-void SimdScalarLowering::LowerExtMul(Node* node, const Operator* multiply,
- SimdType output_type, SimdType input_type,
- bool low, bool is_signed) {
- DCHECK_EQ(2, node->InputCount());
- int num_lanes = NumLanes(output_type);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_type);
- int start_index = low ? 0 : num_lanes;
- Node** rep_node = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; i++) {
- Node* left = ExtendNode(rep_left[start_index + i], input_type, is_signed);
- Node* right = ExtendNode(rep_right[start_index + i], input_type, is_signed);
- rep_node[i] = graph()->NewNode(multiply, left, right);
- }
- ReplaceNode(node, rep_node, num_lanes);
-}
-
-bool SimdScalarLowering::DefaultLowering(Node* node) {
- bool something_changed = false;
- for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
- Node* input = node->InputAt(i);
- if (HasReplacement(0, input)) {
- something_changed = true;
- node->ReplaceInput(i, GetReplacements(input)[0]);
- }
- if (ReplacementCount(input) > 1 && HasReplacement(1, input)) {
- something_changed = true;
- for (int j = 1; j < ReplacementCount(input); ++j) {
- node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
- }
- }
- }
- return something_changed;
-}
-
-void SimdScalarLowering::ReplaceNode(Node* old, Node** new_nodes, int count) {
- replacements_[old->id()].node = zone()->NewArray<Node*>(count);
- for (int i = 0; i < count; ++i) {
- replacements_[old->id()].node[i] = new_nodes[i];
- }
- replacements_[old->id()].num_replacements = count;
-}
-
-bool SimdScalarLowering::HasReplacement(size_t index, Node* node) {
- return replacements_[node->id()].node != nullptr &&
- replacements_[node->id()].node[index] != nullptr;
-}
-
-SimdScalarLowering::SimdType SimdScalarLowering::ReplacementType(Node* node) {
- return replacements_[node->id()].type;
-}
-
-Node** SimdScalarLowering::GetReplacements(Node* node) {
- Node** result = replacements_[node->id()].node;
- DCHECK(result);
- return result;
-}
-
-int SimdScalarLowering::ReplacementCount(Node* node) {
- return replacements_[node->id()].num_replacements;
-}
-
-void SimdScalarLowering::Int32ToFloat32(Node** replacements, Node** result) {
- for (int i = 0; i < kNumLanes32; ++i) {
- if (replacements[i] != nullptr) {
- result[i] =
- graph()->NewNode(machine()->BitcastInt32ToFloat32(), replacements[i]);
- } else {
- result[i] = nullptr;
- }
- }
-}
-
-void SimdScalarLowering::Int64ToFloat64(Node** replacements, Node** result) {
- for (int i = 0; i < kNumLanes64; ++i) {
- if (replacements[i] != nullptr) {
- result[i] =
- graph()->NewNode(machine()->BitcastInt64ToFloat64(), replacements[i]);
- } else {
- result[i] = nullptr;
- }
- }
-}
-
-void SimdScalarLowering::Float64ToInt64(Node** replacements, Node** result) {
- for (int i = 0; i < kNumLanes64; ++i) {
- if (replacements[i] != nullptr) {
- result[i] =
- graph()->NewNode(machine()->BitcastFloat64ToInt64(), replacements[i]);
- } else {
- result[i] = nullptr;
- }
- }
-}
-
-void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
- for (int i = 0; i < kNumLanes32; ++i) {
- if (replacements[i] != nullptr) {
- result[i] =
- graph()->NewNode(machine()->BitcastFloat32ToInt32(), replacements[i]);
- } else {
- result[i] = nullptr;
- }
- }
-}
-
-void SimdScalarLowering::Int64ToInt32(Node** replacements, Node** result) {
- const int num_ints = sizeof(int64_t) / sizeof(int32_t);
- const int bit_size = sizeof(int32_t) * 8;
- const Operator* truncate = machine()->TruncateInt64ToInt32();
-
- for (int i = 0; i < kNumLanes64; i++) {
- if (replacements[i] != nullptr) {
- for (int j = 0; j < num_ints; j++) {
- result[num_ints * i + j] = graph()->NewNode(
- truncate, graph()->NewNode(machine()->Word64Sar(), replacements[i],
- mcgraph_->Int32Constant(j * bit_size)));
- }
- } else {
- for (int j = 0; j < num_ints; j++) {
- result[num_ints * i + j] = nullptr;
- }
- }
- }
-}
-
-template <typename T>
-void SimdScalarLowering::Int32ToSmallerInt(Node** replacements, Node** result) {
- const int num_ints = sizeof(int32_t) / sizeof(T);
- const int bit_size = sizeof(T) * 8;
- const Operator* sign_extend;
- switch (sizeof(T)) {
- case 1:
- sign_extend = machine()->SignExtendWord8ToInt32();
- break;
- case 2:
- sign_extend = machine()->SignExtendWord16ToInt32();
- break;
- default:
- UNREACHABLE();
- }
-
- for (int i = 0; i < kNumLanes32; i++) {
- if (replacements[i] != nullptr) {
- for (int j = 0; j < num_ints; j++) {
- result[num_ints * i + j] = graph()->NewNode(
- sign_extend,
- graph()->NewNode(machine()->Word32Shr(), replacements[i],
- mcgraph_->Int32Constant(j * bit_size)));
- }
- } else {
- for (int j = 0; j < num_ints; j++) {
- result[num_ints * i + j] = nullptr;
- }
- }
- }
-}
-
-template <typename T>
-void SimdScalarLowering::SmallerIntToInt32(Node** replacements, Node** result) {
- const int num_ints = sizeof(int32_t) / sizeof(T);
- const int bit_size = sizeof(T) * 8;
- const int bit_mask = (1 << bit_size) - 1;
-
- for (int i = 0; i < kNumLanes32; ++i) {
- result[i] = mcgraph_->Int32Constant(0);
- for (int j = 0; j < num_ints; j++) {
- if (replacements[num_ints * i + j] != nullptr) {
- Node* clean_bits = graph()->NewNode(machine()->Word32And(),
- replacements[num_ints * i + j],
- mcgraph_->Int32Constant(bit_mask));
- Node* shift = graph()->NewNode(machine()->Word32Shl(), clean_bits,
- mcgraph_->Int32Constant(j * bit_size));
- result[i] = graph()->NewNode(machine()->Word32Or(), result[i], shift);
- }
- }
- }
-}
-
-void SimdScalarLowering::Int32ToInt64(Node** replacements, Node** result) {
- const int num_ints = sizeof(int64_t) / sizeof(int32_t);
-
- for (int i = 0; i < kNumLanes64; i++) {
- Node* i64 = graph()->NewNode(machine()->ChangeUint32ToUint64(),
- replacements[num_ints * i + 1]);
- Node* high = graph()->NewNode(machine()->Word64Shl(), i64,
- mcgraph_->Int32Constant(32));
- Node* i64_low = graph()->NewNode(machine()->ChangeUint32ToUint64(),
- replacements[num_ints * i]);
- result[i] = graph()->NewNode(machine()->Word64Or(), high, i64_low);
- }
-}
-
-Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
- // Operations like extract lane, bitmask, any_true, all_true replaces a SIMD
- // node with a scalar. Those won't be correctly handled here. They should be
- // special cased and replaced with the appropriate scalar.
- DCHECK_LT(1, ReplacementCount(node));
-
- Node** replacements = GetReplacements(node);
- if (type == ReplacementType(node)) {
- return replacements;
- }
-
- int num_lanes = NumLanes(type);
- Node** result = zone()->NewArray<Node*>(num_lanes);
-
- switch (type) {
- case SimdType::kInt64x2: {
- switch (ReplacementType(node)) {
- case SimdType::kInt64x2: {
- UNREACHABLE();
- }
- case SimdType::kInt32x4: {
- Int32ToInt64(replacements, result);
- break;
- }
- case SimdType::kInt16x8: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- SmallerIntToInt32<int16_t>(replacements, to_int32);
- Int32ToInt64(to_int32, result);
- break;
- }
- case SimdType::kInt8x16: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- SmallerIntToInt32<int8_t>(replacements, to_int32);
- Int32ToInt64(to_int32, result);
- break;
- }
- case SimdType::kFloat64x2: {
- Float64ToInt64(replacements, result);
- break;
- }
- case SimdType::kFloat32x4: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float32ToInt32(replacements, to_int32);
- Int32ToInt64(to_int32, result);
- break;
- }
- }
- break;
- }
- case SimdType::kInt32x4: {
- switch (ReplacementType(node)) {
- case SimdType::kInt64x2: {
- Int64ToInt32(replacements, result);
- break;
- }
- case SimdType::kInt32x4: {
- UNREACHABLE();
- }
- case SimdType::kInt16x8: {
- SmallerIntToInt32<int16_t>(replacements, result);
- break;
- }
- case SimdType::kInt8x16: {
- SmallerIntToInt32<int8_t>(replacements, result);
- break;
- }
- case SimdType::kFloat64x2: {
- Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Float64ToInt64(replacements, float64_to_int64);
- Int64ToInt32(float64_to_int64, result);
- break;
- }
- case SimdType::kFloat32x4: {
- Float32ToInt32(replacements, result);
- break;
- }
- }
- break;
- }
- case SimdType::kInt16x8: {
- switch (ReplacementType(node)) {
- case SimdType::kInt64x2: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Int64ToInt32(replacements, to_int32);
- Int32ToSmallerInt<int16_t>(to_int32, result);
- break;
- }
- case SimdType::kInt32x4: {
- Int32ToSmallerInt<int16_t>(replacements, result);
- break;
- }
- case SimdType::kInt16x8: {
- UNREACHABLE();
- }
- case SimdType::kInt8x16: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- SmallerIntToInt32<int8_t>(replacements, to_int32);
- Int32ToSmallerInt<int16_t>(to_int32, result);
- break;
- }
- case SimdType::kFloat64x2: {
- Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float64ToInt64(replacements, to_int64);
- Int64ToInt32(to_int64, to_int32);
- Int32ToSmallerInt<int16_t>(to_int32, result);
- break;
- }
- case SimdType::kFloat32x4: {
- Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float32ToInt32(replacements, float32_to_int32);
- Int32ToSmallerInt<int16_t>(float32_to_int32, result);
- break;
- }
- }
- break;
- }
- case SimdType::kInt8x16: {
- switch (ReplacementType(node)) {
- case SimdType::kInt64x2: {
- Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Int64ToInt32(replacements, int64_to_int32);
- Int32ToSmallerInt<int8_t>(int64_to_int32, result);
- break;
- }
- case SimdType::kInt32x4: {
- Int32ToSmallerInt<int8_t>(replacements, result);
- break;
- }
- case SimdType::kInt16x8: {
- Node** int16_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- SmallerIntToInt32<int16_t>(replacements, int16_to_int32);
- Int32ToSmallerInt<int8_t>(int16_to_int32, result);
- break;
- }
- case SimdType::kInt8x16: {
- UNREACHABLE();
- }
- case SimdType::kFloat64x2: {
- Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float64ToInt64(replacements, to_int64);
- Int64ToInt32(to_int64, to_int32);
- Int32ToSmallerInt<int8_t>(to_int32, result);
- break;
- }
- case SimdType::kFloat32x4: {
- Node** float32_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float32ToInt32(replacements, float32_to_int32);
- Int32ToSmallerInt<int8_t>(float32_to_int32, result);
- break;
- }
- }
- break;
- }
- case SimdType::kFloat64x2: {
- switch (ReplacementType(node)) {
- case SimdType::kInt64x2: {
- Int64ToFloat64(replacements, result);
- break;
- }
- case SimdType::kInt32x4: {
- Node** int32_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Int32ToInt64(replacements, int32_to_int64);
- Int64ToFloat64(int32_to_int64, result);
- break;
- }
- case SimdType::kInt16x8: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- SmallerIntToInt32<int16_t>(replacements, to_int32);
- Int32ToInt64(to_int32, to_int64);
- Int64ToFloat64(to_int64, result);
- break;
- }
- case SimdType::kInt8x16: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- SmallerIntToInt32<int8_t>(replacements, to_int32);
- Int32ToInt64(to_int32, to_int64);
- Int64ToFloat64(to_int64, result);
- break;
- }
- case SimdType::kFloat64x2: {
- UNREACHABLE();
- }
- case SimdType::kFloat32x4: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Node** to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Float32ToInt32(replacements, to_int32);
- Int32ToInt64(to_int32, to_int64);
- Int64ToFloat64(to_int64, result);
- break;
- }
- }
- break;
- }
- case SimdType::kFloat32x4: {
- switch (ReplacementType(node)) {
- case SimdType::kInt64x2: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Int64ToInt32(replacements, to_int32);
- Int32ToFloat32(to_int32, result);
- break;
- }
- case SimdType::kInt32x4: {
- Int32ToFloat32(replacements, result);
- break;
- }
- case SimdType::kInt16x8: {
- Node** to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- SmallerIntToInt32<int16_t>(replacements, to_int32);
- Int32ToFloat32(to_int32, result);
- break;
- }
- case SimdType::kInt8x16: {
- SmallerIntToInt32<int8_t>(replacements, result);
- Int32ToFloat32(result, result);
- break;
- }
- case SimdType::kFloat64x2: {
- Node** float64_to_int64 = zone()->NewArray<Node*>(kNumLanes64);
- Node** int64_to_int32 = zone()->NewArray<Node*>(kNumLanes32);
- Float64ToInt64(replacements, float64_to_int64);
- Int64ToInt32(float64_to_int64, int64_to_int32);
- Int32ToFloat32(int64_to_int32, result);
- break;
- }
- case SimdType::kFloat32x4: {
- UNREACHABLE();
- }
- }
- break;
- }
- }
- return result;
-}
-
-void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
- MachineRepresentation rep = PhiRepresentationOf(phi->op());
- if (rep == MachineRepresentation::kSimd128) {
- // We have to create the replacements for a phi node before we actually
- // lower the phi to break potential cycles in the graph. The replacements of
- // input nodes do not exist yet, so we use a placeholder node to pass the
- // graph verifier.
- int value_count = phi->op()->ValueInputCount();
- SimdType type = ReplacementType(phi);
- int num_lanes = NumLanes(type);
- Node*** inputs_rep = zone()->NewArray<Node**>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
- inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
- }
- for (int i = 0; i < value_count; ++i) {
- for (int j = 0; j < num_lanes; ++j) {
- inputs_rep[j][i] = placeholder_;
- }
- }
- Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
- for (int i = 0; i < num_lanes; ++i) {
- rep_nodes[i] = graph()->NewNode(
- common()->Phi(MachineTypeFrom(type).representation(), value_count),
- value_count + 1, inputs_rep[i], false);
- }
- ReplaceNode(phi, rep_nodes, num_lanes);
- }
-}
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/chromium/v8/src/compiler/simd-scalar-lowering.h b/chromium/v8/src/compiler/simd-scalar-lowering.h
deleted file mode 100644
index d67c389d16d..00000000000
--- a/chromium/v8/src/compiler/simd-scalar-lowering.h
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_SIMD_SCALAR_LOWERING_H_
-#define V8_COMPILER_SIMD_SCALAR_LOWERING_H_
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/diamond.h"
-#include "src/compiler/graph.h"
-#include "src/compiler/machine-graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-marker.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename T>
-class Signature;
-
-namespace compiler {
-
-class SimdScalarLowering {
- public:
- SimdScalarLowering(MachineGraph* mcgraph,
- SimplifiedOperatorBuilder* simplified,
- Signature<MachineRepresentation>* signature);
-
- void LowerGraph();
-
- int GetParameterCountAfterLowering();
-
- private:
- enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
-
- enum class SimdType : uint8_t {
- kFloat64x2,
- kFloat32x4,
- kInt64x2,
- kInt32x4,
- kInt16x8,
- kInt8x16
- };
-
-#if defined(V8_TARGET_BIG_ENDIAN)
- static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
- 7, 6, 5, 4, 3, 2, 1, 0};
-#else
- static constexpr int kLaneOffsets[16] = {0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15};
-#endif
- struct Replacement {
- Node** node = nullptr;
- SimdType type; // represents output type
- int num_replacements = 0;
- };
-
- struct NodeState {
- Node* node;
- int input_index;
- };
-
- Zone* zone() const { return mcgraph_->zone(); }
- Graph* graph() const { return mcgraph_->graph(); }
- MachineOperatorBuilder* machine() const { return mcgraph_->machine(); }
- CommonOperatorBuilder* common() const { return mcgraph_->common(); }
- SimplifiedOperatorBuilder* simplified() const { return simplified_; }
- Signature<MachineRepresentation>* signature() const { return signature_; }
-
- void LowerNode(Node* node);
- bool DefaultLowering(Node* node);
-
- int NumLanes(SimdType type);
- void ReplaceNode(Node* old, Node** new_nodes, int count);
- bool HasReplacement(size_t index, Node* node);
- Node** GetReplacements(Node* node);
- int ReplacementCount(Node* node);
- void Float64ToInt64(Node** replacements, Node** result);
- void Float32ToInt32(Node** replacements, Node** result);
- void Int32ToFloat32(Node** replacements, Node** result);
- void Int64ToFloat64(Node** replacements, Node** result);
- void Int64ToInt32(Node** replacements, Node** result);
- template <typename T>
- void Int32ToSmallerInt(Node** replacements, Node** result);
- template <typename T>
- void SmallerIntToInt32(Node** replacements, Node** result);
- void Int32ToInt64(Node** replacements, Node** result);
- Node** GetReplacementsWithType(Node* node, SimdType type);
- SimdType ReplacementType(Node* node);
- void PreparePhiReplacement(Node* phi);
- void SetLoweredType(Node* node, Node* output);
- void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
- void LowerLoadOp(Node* node, SimdType type);
- void LowerLoadTransformOp(Node* node, SimdType type);
- void LowerStoreOp(Node* node);
- void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
- bool not_horizontal = true);
- Node* ConstructPhiForComparison(Diamond d, SimdType rep_type, int true_value,
- int false_value);
- void LowerCompareOp(Node* node, SimdType input_rep_type, const Operator* op,
- bool invert_inputs = false);
- Node* FixUpperBits(Node* input, int32_t shift);
- void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
- const Operator* op, bool not_horizontal = true);
- Node* Mask(Node* input, int32_t mask);
- void LowerSaturateBinaryOp(Node* node, SimdType input_rep_type,
- const Operator* op, bool is_signed);
- void LowerUnaryOp(Node* node, SimdType input_rep_type, const Operator* op);
- void LowerIntMinMax(Node* node, const Operator* op, bool is_max,
- SimdType type);
- void LowerConvertFromFloat(Node* node, bool is_signed);
- void LowerConvertFromInt(Node* node, SimdType input_rep_type,
- SimdType output_rep_type, bool is_signed,
- int start_index);
- void LowerPack(Node* node, SimdType input_rep_type, SimdType output_rep_type,
- bool is_signed);
- void LowerShiftOp(Node* node, SimdType type);
- Node* BuildF64Trunc(Node* input);
- void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
- MachineType MachineTypeFrom(SimdType simdType);
- void LowerBitMaskOp(Node* node, SimdType rep_type, int msb_index);
- void LowerAllTrueOp(Node* node, SimdType rep_type);
- void LowerFloatPseudoMinMax(Node* node, const Operator* op, bool is_max,
- SimdType type);
- void LowerExtMul(Node* node, const Operator* op, SimdType output_type,
- SimdType input_type, bool low, bool is_signed);
-
- // Extends node, which is a lowered node of type rep_type, e.g. int8, int16,
- // int32 to a 32-bit or 64-bit node. node should be a lowered node (i.e. not a
- // SIMD node). The assumption here is that small ints are stored sign
- // extended.
- Node* ExtendNode(Node* node, SimdType rep_type, bool is_signed);
-
- MachineGraph* const mcgraph_;
- SimplifiedOperatorBuilder* const simplified_;
- NodeMarker<State> state_;
- ZoneDeque<NodeState> stack_;
- Replacement* replacements_;
- Signature<MachineRepresentation>* signature_;
- Node* placeholder_;
- int parameter_count_after_lowering_;
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_SIMD_SCALAR_LOWERING_H_
diff --git a/chromium/v8/src/compiler/simplified-lowering.cc b/chromium/v8/src/compiler/simplified-lowering.cc
index d5389df6ac8..75de8ecc421 100644
--- a/chromium/v8/src/compiler/simplified-lowering.cc
+++ b/chromium/v8/src/compiler/simplified-lowering.cc
@@ -101,18 +101,20 @@ MachineRepresentation MachineRepresentationFromArrayType(
}
UseInfo CheckedUseInfoAsWord32FromHint(
- NumberOperationHint hint, const FeedbackSource& feedback = FeedbackSource(),
- IdentifyZeros identify_zeros = kDistinguishZeros) {
+ NumberOperationHint hint, IdentifyZeros identify_zeros = kDistinguishZeros,
+ const FeedbackSource& feedback = FeedbackSource()) {
switch (hint) {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
return UseInfo::CheckedSignedSmallAsWord32(identify_zeros, feedback);
case NumberOperationHint::kNumber:
+ DCHECK_EQ(identify_zeros, kIdentifyZeros);
return UseInfo::CheckedNumberAsWord32(feedback);
case NumberOperationHint::kNumberOrBoolean:
// Not used currently.
UNREACHABLE();
case NumberOperationHint::kNumberOrOddball:
+ DCHECK_EQ(identify_zeros, kIdentifyZeros);
return UseInfo::CheckedNumberOrOddballAsWord32(feedback);
}
UNREACHABLE();
@@ -142,6 +144,7 @@ UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) {
return UseInfo::TaggedSigned();
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
+ case MachineRepresentation::kMapWord:
return UseInfo::AnyTagged();
case MachineRepresentation::kFloat64:
return UseInfo::TruncatingFloat64();
@@ -215,7 +218,7 @@ bool CanOverflowSigned32(const Operator* op, Type left, Type right,
}
bool IsSomePositiveOrderedNumber(Type type) {
- return type.Is(Type::OrderedNumber()) && !type.IsNone() && type.Min() > 0;
+ return type.Is(Type::OrderedNumber()) && (type.IsNone() || type.Min() > 0);
}
} // namespace
@@ -291,15 +294,17 @@ class RepresentationSelector {
Type restriction_type() const { return restriction_type_; }
private:
+ // Fields are ordered to avoid mixing byte and word size fields to minimize
+ // padding.
enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued };
State state_ = kUnvisited;
MachineRepresentation representation_ =
MachineRepresentation::kNone; // Output representation.
Truncation truncation_ = Truncation::None(); // Information about uses.
+ bool weakened_ = false;
Type restriction_type_ = Type::Any();
Type feedback_type_;
- bool weakened_ = false;
};
RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
@@ -717,7 +722,7 @@ class RepresentationSelector {
void EnqueueInput(Node* use_node, int index,
UseInfo use_info = UseInfo::None()) {
static_assert(retype<T>() || lower<T>(),
- "This version of ProcessRemainingInputs has to be called in "
+ "This version of EnqueueInput has to be called in "
"the Retype or Lower phase.");
}
@@ -784,9 +789,9 @@ class RepresentationSelector {
// TODO(jarin,turbofan) Find a way to unify/merge this insertion with
// InsertUnreachableIfNecessary.
Node* unreachable = effect =
- graph()->NewNode(jsgraph_->common()->Unreachable(), effect, control);
+ graph()->NewNode(common()->Unreachable(), effect, control);
const Operator* dead_value =
- jsgraph_->common()->DeadValue(GetInfo(node)->representation());
+ common()->DeadValue(GetInfo(node)->representation());
node->ReplaceInput(0, unreachable);
node->TrimInputCount(dead_value->ValueInputCount());
ReplaceEffectControlUses(node, effect, control);
@@ -833,7 +838,13 @@ class RepresentationSelector {
} else {
DCHECK_EQ(0, node->op()->ControlInputCount());
}
- node->InsertInput(jsgraph_->zone(), new_input_index, new_input);
+ if (new_input_index == 0) {
+ node->InsertInput(jsgraph_->zone(), 0, new_input);
+ } else {
+ DCHECK_EQ(new_input_index, 1);
+ DCHECK_EQ(node->InputCount(), 1);
+ node->AppendInput(jsgraph_->zone(), new_input);
+ }
ChangeOp(node, new_op);
}
@@ -924,7 +935,15 @@ class RepresentationSelector {
ProcessInput<T>(node, i, UseInfo::None());
}
ProcessRemainingInputs<T>(node, first_effect_index);
- if (lower<T>()) Kill(node);
+
+ if (lower<T>()) {
+ TRACE("disconnecting unused #%d:%s\n", node->id(),
+ node->op()->mnemonic());
+ DisconnectFromEffectAndControl(node);
+ node->NullAllInputs();
+ // We still keep the partial node connected to its uses, knowing that
+ // lowering these operators is going to eliminate the uses.
+ }
}
// Helper for no-op node.
@@ -966,7 +985,8 @@ class RepresentationSelector {
MachineRepresentation::kWord32);
}
NumberOperationHint hint = NumberOperationHintOf(node->op());
- return VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ return VisitBinop<T>(node,
+ CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
MachineRepresentation::kWord32);
}
@@ -1233,7 +1253,7 @@ class RepresentationSelector {
DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
}
SparseInputMask mask = SparseInputMaskOf(node->op());
- ChangeOp(node, jsgraph_->common()->TypedStateValues(types, mask));
+ ChangeOp(node, common()->TypedStateValues(types, mask));
}
SetOutput<T>(node, MachineRepresentation::kTagged);
}
@@ -1282,9 +1302,9 @@ class RepresentationSelector {
node->ReplaceInput(
FrameState::kFrameStateStackInput,
- jsgraph_->graph()->NewNode(jsgraph_->common()->TypedStateValues(
- types, SparseInputMask::Dense()),
- node.stack()));
+ jsgraph_->graph()->NewNode(
+ common()->TypedStateValues(types, SparseInputMask::Dense()),
+ node.stack()));
}
}
@@ -1323,8 +1343,7 @@ class RepresentationSelector {
ConvertInput(node, i, UseInfo::AnyTagged());
}
}
- ChangeOp(node, jsgraph_->common()->TypedObjectState(
- ObjectIdOf(node->op()), types));
+ ChangeOp(node, common()->TypedObjectState(ObjectIdOf(node->op()), types));
}
SetOutput<T>(node, MachineRepresentation::kTagged);
}
@@ -1484,8 +1503,8 @@ class RepresentationSelector {
}
// Try to use type feedback.
- NumberOperationHint hint = NumberOperationHintOf(node->op());
- DCHECK_EQ(hint, NumberOperationHint::kSignedSmall);
+ NumberOperationHint const hint = NumberOperationHint::kSignedSmall;
+ DCHECK_EQ(hint, NumberOperationHintOf(node->op()));
Type left_feedback_type = TypeOf(node->InputAt(0));
Type right_feedback_type = TypeOf(node->InputAt(1));
@@ -1525,14 +1544,13 @@ class RepresentationSelector {
!right_feedback_type.Maybe(Type::MinusZero())) {
left_identify_zeros = kIdentifyZeros;
}
- UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
- left_identify_zeros);
+ UseInfo left_use =
+ CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros);
// For CheckedInt32Add and CheckedInt32Sub, we don't need to do
// a minus zero check for the right hand side, since we already
// know that the left hand side is a proper Signed32 value,
// potentially guarded by a check.
- UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(),
- kIdentifyZeros);
+ UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
VisitBinop<T>(node, left_use, right_use, MachineRepresentation::kWord32,
restriction);
}
@@ -1543,7 +1561,6 @@ class RepresentationSelector {
right_feedback_type, type_cache_,
graph_zone())) {
ChangeToPureOp(node, Int32Op(node));
-
} else {
ChangeToInt32OverflowOp(node);
}
@@ -1627,10 +1644,10 @@ class RepresentationSelector {
// mode of the {truncation}; and for modulus the sign of the
// right hand side doesn't matter anyways, so in particular there's
// no observable difference between a 0 and a -0 then.
- UseInfo const lhs_use = CheckedUseInfoAsWord32FromHint(
- hint, FeedbackSource(), truncation.identify_zeros());
- UseInfo const rhs_use = CheckedUseInfoAsWord32FromHint(
- hint, FeedbackSource(), kIdentifyZeros);
+ UseInfo const lhs_use =
+ CheckedUseInfoAsWord32FromHint(hint, truncation.identify_zeros());
+ UseInfo const rhs_use =
+ CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros);
if (truncation.IsUsedAsWord32()) {
VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32);
if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node));
@@ -1788,6 +1805,7 @@ class RepresentationSelector {
case CTypeInfo::Type::kFloat64:
return UseInfo::CheckedNumberAsFloat64(kDistinguishZeros, feedback);
case CTypeInfo::Type::kV8Value:
+ case CTypeInfo::Type::kApiObject:
return UseInfo::AnyTagged();
}
}
@@ -1944,6 +1962,33 @@ class RepresentationSelector {
SimplifiedLowering* lowering) {
tick_counter_->TickAndMaybeEnterSafepoint();
+ if (lower<T>()) {
+ // Kill non-effectful operations that have a None-type input and are thus
+ // dead code. Otherwise we might end up lowering the operation in a way,
+ // e.g. by replacing it with a constant, that cuts the dependency on a
+ // deopting operation (the producer of the None type), possibly resulting
+ // in a nonsense schedule.
+ if (node->op()->EffectOutputCount() == 0 &&
+ node->op()->ControlOutputCount() == 0 &&
+ node->opcode() != IrOpcode::kDeadValue &&
+ node->opcode() != IrOpcode::kStateValues &&
+ node->opcode() != IrOpcode::kFrameState &&
+ node->opcode() != IrOpcode::kPhi) {
+ for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+ Node* input = node->InputAt(i);
+ if (TypeOf(input).IsNone()) {
+ node->ReplaceInput(0, input);
+ node->TrimInputCount(1);
+ ChangeOp(node,
+ common()->DeadValue(GetInfo(node)->representation()));
+ return;
+ }
+ }
+ } else {
+ InsertUnreachableIfNecessary<T>(node);
+ }
+ }
+
// Unconditionally eliminate unused pure nodes (only relevant if there's
// a pure operation in between two effectful ones, where the last one
// is unused).
@@ -1957,8 +2002,6 @@ class RepresentationSelector {
return VisitUnused<T>(node);
}
- if (lower<T>()) InsertUnreachableIfNecessary<T>(node);
-
switch (node->opcode()) {
//------------------------------------------------------------------
// Common operators.
@@ -2182,10 +2225,9 @@ class RepresentationSelector {
switch (hint) {
case NumberOperationHint::kSignedSmall:
if (propagate<T>()) {
- VisitBinop<T>(node,
- CheckedUseInfoAsWord32FromHint(
- hint, FeedbackSource(), kIdentifyZeros),
- MachineRepresentation::kBit);
+ VisitBinop<T>(
+ node, CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
+ MachineRepresentation::kBit);
} else if (retype<T>()) {
SetOutput<T>(node, MachineRepresentation::kBit, Type::Any());
} else {
@@ -2202,10 +2244,9 @@ class RepresentationSelector {
node, changer_->TaggedSignedOperatorFor(node->opcode()));
} else {
- VisitBinop<T>(node,
- CheckedUseInfoAsWord32FromHint(
- hint, FeedbackSource(), kIdentifyZeros),
- MachineRepresentation::kBit);
+ VisitBinop<T>(
+ node, CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
+ MachineRepresentation::kBit);
ChangeToPureOp(node, Int32Op(node));
}
}
@@ -2497,7 +2538,8 @@ class RepresentationSelector {
}
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitBinop<T>(node,
+ CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
MachineRepresentation::kWord32, Type::Signed32());
if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
@@ -2530,7 +2572,8 @@ class RepresentationSelector {
}
NumberOperationHint hint = NumberOperationHintOf(node->op());
Type rhs_type = GetUpperBound(node->InputAt(1));
- VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitBinop<T>(node,
+ CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
MachineRepresentation::kWord32, Type::Signed32());
if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
@@ -2559,7 +2602,8 @@ class RepresentationSelector {
// have seen so far were of type Unsigned31. We speculate that this
// will continue to hold. Moreover, since the RHS is 0, the result
// will just be the (converted) LHS.
- VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitBinop<T>(node,
+ CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
MachineRepresentation::kWord32, Type::Unsigned31());
if (lower<T>()) {
node->RemoveInput(1);
@@ -2578,7 +2622,8 @@ class RepresentationSelector {
}
return;
}
- VisitBinop<T>(node, CheckedUseInfoAsWord32FromHint(hint),
+ VisitBinop<T>(node,
+ CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros),
MachineRepresentation::kWord32, Type::Unsigned32());
if (lower<T>()) {
MaskShiftOperand(node, rhs_type);
@@ -2785,9 +2830,25 @@ class RepresentationSelector {
}
return;
}
- case IrOpcode::kBigIntAsUintN: {
- ProcessInput<T>(node, 0, UseInfo::TruncatingWord64());
+ case IrOpcode::kSpeculativeBigIntAsUintN: {
+ const auto p = SpeculativeBigIntAsUintNParametersOf(node->op());
+ DCHECK_LE(0, p.bits());
+ DCHECK_LE(p.bits(), 64);
+
+ ProcessInput<T>(node, 0,
+ UseInfo::CheckedBigIntTruncatingWord64(p.feedback()));
SetOutput<T>(node, MachineRepresentation::kWord64, Type::BigInt());
+ if (lower<T>()) {
+ if (p.bits() == 0) {
+ DeferReplacement(node, jsgraph_->ZeroConstant());
+ } else if (p.bits() == 64) {
+ DeferReplacement(node, node->InputAt(0));
+ } else {
+ const uint64_t mask = (1ULL << p.bits()) - 1ULL;
+ ChangeUnaryToPureBinaryOp(node, lowering->machine()->Word64And(), 1,
+ jsgraph_->Int64Constant(mask));
+ }
+ }
return;
}
case IrOpcode::kNumberAcos:
@@ -3437,7 +3498,8 @@ class RepresentationSelector {
case NumberOperationHint::kSignedSmall:
case NumberOperationHint::kSignedSmallInputs:
VisitUnop<T>(node,
- CheckedUseInfoAsWord32FromHint(p.hint(), p.feedback()),
+ CheckedUseInfoAsWord32FromHint(
+ p.hint(), kDistinguishZeros, p.feedback()),
MachineRepresentation::kWord32, Type::Signed32());
break;
case NumberOperationHint::kNumber:
@@ -3876,6 +3938,21 @@ class RepresentationSelector {
case IrOpcode::kAssertType:
return VisitUnop<T>(node, UseInfo::AnyTagged(),
MachineRepresentation::kTagged);
+ case IrOpcode::kVerifyType: {
+ Type inputType = TypeOf(node->InputAt(0));
+ VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged,
+ inputType);
+ if (lower<T>()) {
+ CHECK_IMPLIES(!FLAG_fuzzing, inputType.CanBeAsserted());
+ if (inputType.CanBeAsserted()) {
+ ChangeOp(node, simplified()->AssertType(inputType));
+ } else {
+ DeferReplacement(node, node->InputAt(0));
+ }
+ }
+ return;
+ }
+
default:
FATAL(
"Representation inference: unsupported opcode %i (%s), node #%i\n.",
@@ -3885,34 +3962,8 @@ class RepresentationSelector {
UNREACHABLE();
}
- void DeferReplacement(Node* node, Node* replacement) {
- TRACE("defer replacement #%d:%s with #%d:%s\n", node->id(),
- node->op()->mnemonic(), replacement->id(),
- replacement->op()->mnemonic());
-
- // Disconnect the node from effect and control chains, if necessary.
- if (node->op()->EffectInputCount() > 0) {
- DCHECK_LT(0, node->op()->ControlInputCount());
- // Disconnect the node from effect and control chains.
- Node* control = NodeProperties::GetControlInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- ReplaceEffectControlUses(node, effect, control);
- }
-
- replacements_.push_back(node);
- replacements_.push_back(replacement);
-
- node->NullAllInputs(); // Node is now dead.
-
- NotifyNodeReplaced(node, replacement);
- }
-
- void Kill(Node* node) {
- TRACE("killing #%d:%s\n", node->id(), node->op()->mnemonic());
-
+ void DisconnectFromEffectAndControl(Node* node) {
if (node->op()->EffectInputCount() == 1) {
- DCHECK_LT(0, node->op()->ControlInputCount());
- // Disconnect the node from effect and control chains.
Node* control = NodeProperties::GetControlInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
ReplaceEffectControlUses(node, effect, control);
@@ -3921,10 +3972,20 @@ class RepresentationSelector {
DCHECK_EQ(0, node->op()->ControlOutputCount());
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
+ }
+
+ void DeferReplacement(Node* node, Node* replacement) {
+ TRACE("defer replacement #%d:%s with #%d:%s\n", node->id(),
+ node->op()->mnemonic(), replacement->id(),
+ replacement->op()->mnemonic());
- node->ReplaceUses(jsgraph_->Dead());
+ DisconnectFromEffectAndControl(node);
+ node->NullAllInputs(); // Node is now dead.
- node->NullAllInputs(); // The {node} is now dead.
+ replacements_.push_back(node);
+ replacements_.push_back(replacement);
+
+ NotifyNodeReplaced(node, replacement);
}
private:
diff --git a/chromium/v8/src/compiler/simplified-operator-reducer.cc b/chromium/v8/src/compiler/simplified-operator-reducer.cc
index b1d3f8b2f3a..6bc984008db 100644
--- a/chromium/v8/src/compiler/simplified-operator-reducer.cc
+++ b/chromium/v8/src/compiler/simplified-operator-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/simplified-operator-reducer.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
@@ -41,7 +42,7 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() = default;
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
- DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
+ DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0));
@@ -60,7 +61,9 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kChangeTaggedToBit: {
HeapObjectMatcher m(node->InputAt(0));
if (m.HasResolvedValue()) {
- return ReplaceInt32(m.Ref(broker()).BooleanValue());
+ base::Optional<bool> maybe_result =
+ m.Ref(broker()).TryGetBooleanValue();
+ if (maybe_result.has_value()) return ReplaceInt32(*maybe_result);
}
if (m.IsChangeBitToTagged()) return Replace(m.InputAt(0));
break;
diff --git a/chromium/v8/src/compiler/simplified-operator.cc b/chromium/v8/src/compiler/simplified-operator.cc
index 09e3a80ec48..32cc9eea950 100644
--- a/chromium/v8/src/compiler/simplified-operator.cc
+++ b/chromium/v8/src/compiler/simplified-operator.cc
@@ -615,6 +615,27 @@ NumberOperationParameters const& NumberOperationParametersOf(
return OpParameter<NumberOperationParameters>(op);
}
+bool operator==(SpeculativeBigIntAsUintNParameters const& lhs,
+ SpeculativeBigIntAsUintNParameters const& rhs) {
+ return lhs.bits() == rhs.bits() && lhs.feedback() == rhs.feedback();
+}
+
+size_t hash_value(SpeculativeBigIntAsUintNParameters const& p) {
+ FeedbackSource::Hash feedback_hash;
+ return base::hash_combine(p.bits(), feedback_hash(p.feedback()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+ SpeculativeBigIntAsUintNParameters const& p) {
+ return os << p.bits() << ", " << p.feedback();
+}
+
+SpeculativeBigIntAsUintNParameters const& SpeculativeBigIntAsUintNParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kSpeculativeBigIntAsUintN, op->opcode());
+ return OpParameter<SpeculativeBigIntAsUintNParameters>(op);
+}
+
size_t hash_value(AllocateParameters info) {
return base::hash_combine(info.type(),
static_cast<int>(info.allocation_type()));
@@ -1296,11 +1317,14 @@ const Operator* SimplifiedOperatorBuilder::RuntimeAbort(AbortReason reason) {
static_cast<int>(reason)); // parameter
}
-const Operator* SimplifiedOperatorBuilder::BigIntAsUintN(int bits) {
+const Operator* SimplifiedOperatorBuilder::SpeculativeBigIntAsUintN(
+ int bits, const FeedbackSource& feedback) {
CHECK(0 <= bits && bits <= 64);
- return zone()->New<Operator1<int>>(IrOpcode::kBigIntAsUintN, Operator::kPure,
- "BigIntAsUintN", 1, 0, 0, 1, 0, 0, bits);
+ return zone()->New<Operator1<SpeculativeBigIntAsUintNParameters>>(
+ IrOpcode::kSpeculativeBigIntAsUintN, Operator::kNoProperties,
+ "SpeculativeBigIntAsUintN", 1, 1, 1, 1, 1, 0,
+ SpeculativeBigIntAsUintNParameters(bits, feedback));
}
const Operator* SimplifiedOperatorBuilder::UpdateInterruptBudget(int delta) {
@@ -1316,12 +1340,18 @@ const Operator* SimplifiedOperatorBuilder::TierUpCheck() {
}
const Operator* SimplifiedOperatorBuilder::AssertType(Type type) {
- DCHECK(type.IsRange());
+ DCHECK(type.CanBeAsserted());
return zone()->New<Operator1<Type>>(IrOpcode::kAssertType,
Operator::kNoThrow | Operator::kNoDeopt,
"AssertType", 1, 0, 0, 1, 0, 0, type);
}
+const Operator* SimplifiedOperatorBuilder::VerifyType() {
+ return zone()->New<Operator>(IrOpcode::kVerifyType,
+ Operator::kNoThrow | Operator::kNoDeopt,
+ "VerifyType", 1, 0, 0, 1, 0, 0);
+}
+
const Operator* SimplifiedOperatorBuilder::CheckIf(
DeoptimizeReason reason, const FeedbackSource& feedback) {
if (!feedback.IsValid()) {
diff --git a/chromium/v8/src/compiler/simplified-operator.h b/chromium/v8/src/compiler/simplified-operator.h
index cd66b89ea44..03ca88bc9af 100644
--- a/chromium/v8/src/compiler/simplified-operator.h
+++ b/chromium/v8/src/compiler/simplified-operator.h
@@ -127,6 +127,13 @@ struct FieldAccess {
#endif
{
DCHECK_GE(offset, 0);
+ DCHECK_IMPLIES(
+ machine_type.IsMapWord(),
+ offset == HeapObject::kMapOffset && base_is_tagged != kUntaggedBase);
+ DCHECK_IMPLIES(machine_type.IsMapWord(),
+ (write_barrier_kind == kMapWriteBarrier ||
+ write_barrier_kind == kNoWriteBarrier ||
+ write_barrier_kind == kAssertNoWriteBarrier));
}
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
@@ -599,6 +606,30 @@ bool operator==(NumberOperationParameters const&,
const NumberOperationParameters& NumberOperationParametersOf(const Operator* op)
V8_WARN_UNUSED_RESULT;
+class SpeculativeBigIntAsUintNParameters {
+ public:
+ SpeculativeBigIntAsUintNParameters(int bits, const FeedbackSource& feedback)
+ : bits_(bits), feedback_(feedback) {
+ DCHECK_GE(bits_, 0);
+ DCHECK_LE(bits_, 64);
+ }
+
+ int bits() const { return bits_; }
+ const FeedbackSource& feedback() const { return feedback_; }
+
+ private:
+ int bits_;
+ FeedbackSource feedback_;
+};
+
+size_t hash_value(SpeculativeBigIntAsUintNParameters const&);
+V8_EXPORT_PRIVATE std::ostream& operator<<(
+ std::ostream&, const SpeculativeBigIntAsUintNParameters&);
+bool operator==(SpeculativeBigIntAsUintNParameters const&,
+ SpeculativeBigIntAsUintNParameters const&);
+const SpeculativeBigIntAsUintNParameters& SpeculativeBigIntAsUintNParametersOf(
+ const Operator* op) V8_WARN_UNUSED_RESULT;
+
int FormalParameterCountOf(const Operator* op) V8_WARN_UNUSED_RESULT;
class AllocateParameters {
@@ -806,7 +837,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* SpeculativeBigIntAdd(BigIntOperationHint hint);
const Operator* SpeculativeBigIntSubtract(BigIntOperationHint hint);
const Operator* SpeculativeBigIntNegate(BigIntOperationHint hint);
- const Operator* BigIntAsUintN(int bits);
+ const Operator* SpeculativeBigIntAsUintN(int bits,
+ const FeedbackSource& feedback);
const Operator* ReferenceEqual();
const Operator* SameValue();
@@ -1054,6 +1086,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
// Abort if the value input does not inhabit the given type
const Operator* AssertType(Type type);
+ // Abort if the value does not match the node's computed type after
+ // SimplifiedLowering.
+ const Operator* VerifyType();
+
const Operator* DateNow();
// Represents the inputs necessary to construct a fast and a slow API call.
@@ -1112,7 +1148,7 @@ class FastApiCallNode final : public SimplifiedNodeWrapperBase {
public:
explicit constexpr FastApiCallNode(Node* node)
: SimplifiedNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kFastApiCall);
+ DCHECK_EQ(IrOpcode::kFastApiCall, node->opcode());
}
const FastApiCallParameters& Parameters() const {
@@ -1184,7 +1220,7 @@ class TierUpCheckNode final : public SimplifiedNodeWrapperBase {
public:
explicit constexpr TierUpCheckNode(Node* node)
: SimplifiedNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kTierUpCheck);
+ DCHECK_EQ(IrOpcode::kTierUpCheck, node->opcode());
}
#define INPUTS(V) \
@@ -1201,7 +1237,7 @@ class UpdateInterruptBudgetNode final : public SimplifiedNodeWrapperBase {
public:
explicit constexpr UpdateInterruptBudgetNode(Node* node)
: SimplifiedNodeWrapperBase(node) {
- CONSTEXPR_DCHECK(node->opcode() == IrOpcode::kUpdateInterruptBudget);
+ DCHECK_EQ(IrOpcode::kUpdateInterruptBudget, node->opcode());
}
int delta() const { return OpParameter<int>(node()->op()); }
diff --git a/chromium/v8/src/compiler/store-store-elimination.cc b/chromium/v8/src/compiler/store-store-elimination.cc
index 70dadd94417..3600ee1a41a 100644
--- a/chromium/v8/src/compiler/store-store-elimination.cc
+++ b/chromium/v8/src/compiler/store-store-elimination.cc
@@ -331,8 +331,8 @@ UnobservablesSet RedundantStoreFinder::RecomputeSet(
bool RedundantStoreFinder::CannotObserveStoreField(Node* node) {
IrOpcode::Value opcode = node->opcode();
return opcode == IrOpcode::kLoadElement || opcode == IrOpcode::kLoad ||
- opcode == IrOpcode::kStore || opcode == IrOpcode::kEffectPhi ||
- opcode == IrOpcode::kStoreElement ||
+ opcode == IrOpcode::kLoadImmutable || opcode == IrOpcode::kStore ||
+ opcode == IrOpcode::kEffectPhi || opcode == IrOpcode::kStoreElement ||
opcode == IrOpcode::kUnsafePointerAdd || opcode == IrOpcode::kRetain;
}
diff --git a/chromium/v8/src/compiler/type-cache.h b/chromium/v8/src/compiler/type-cache.h
index 2ade5f68a3b..6442b6f6b0e 100644
--- a/chromium/v8/src/compiler/type-cache.h
+++ b/chromium/v8/src/compiler/type-cache.h
@@ -36,9 +36,10 @@ class V8_EXPORT_PRIVATE TypeCache final {
Type const kUnsigned31 = Type::Unsigned31();
Type const kInt32 = Type::Signed32();
Type const kUint32 = Type::Unsigned32();
- Type const kInt64 = CreateRange<int64_t>();
- Type const kUint64 = CreateRange<uint64_t>();
- Type const kIntPtr = CreateRange<intptr_t>();
+ Type const kDoubleRepresentableInt64 = CreateRange(
+ std::numeric_limits<int64_t>::min(), kMaxDoubleRepresentableInt64);
+ Type const kDoubleRepresentableUint64 = CreateRange(
+ std::numeric_limits<uint64_t>::min(), kMaxDoubleRepresentableUint64);
Type const kFloat32 = Type::Number();
Type const kFloat64 = Type::Number();
Type const kBigInt64 = Type::BigInt();
@@ -190,8 +191,11 @@ class V8_EXPORT_PRIVATE TypeCache final {
private:
template <typename T>
Type CreateRange() {
- return CreateRange(std::numeric_limits<T>::min(),
- std::numeric_limits<T>::max());
+ T min = std::numeric_limits<T>::min();
+ T max = std::numeric_limits<T>::max();
+ DCHECK_EQ(min, static_cast<T>(static_cast<double>(min)));
+ DCHECK_EQ(max, static_cast<T>(static_cast<double>(max)));
+ return CreateRange(min, max);
}
Type CreateRange(double min, double max) {
@@ -199,6 +203,10 @@ class V8_EXPORT_PRIVATE TypeCache final {
}
Zone* zone() { return &zone_; }
+
+ static constexpr double kMaxDoubleRepresentableInt64 = 9223372036854774784.0;
+ static constexpr double kMaxDoubleRepresentableUint64 =
+ 18446744073709549568.0;
};
} // namespace compiler
diff --git a/chromium/v8/src/compiler/type-narrowing-reducer.cc b/chromium/v8/src/compiler/type-narrowing-reducer.cc
index b24e82c53b8..996b0883762 100644
--- a/chromium/v8/src/compiler/type-narrowing-reducer.cc
+++ b/chromium/v8/src/compiler/type-narrowing-reducer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/type-narrowing-reducer.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/js-heap-broker.h"
namespace v8 {
namespace internal {
@@ -12,12 +13,15 @@ namespace compiler {
TypeNarrowingReducer::TypeNarrowingReducer(Editor* editor, JSGraph* jsgraph,
JSHeapBroker* broker)
- : AdvancedReducer(editor), jsgraph_(jsgraph), op_typer_(broker, zone()) {}
+ : AdvancedReducer(editor),
+ jsgraph_(jsgraph),
+ broker_(broker),
+ op_typer_(broker, zone()) {}
TypeNarrowingReducer::~TypeNarrowingReducer() = default;
Reduction TypeNarrowingReducer::Reduce(Node* node) {
- DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
+ DisallowHeapAccessIf no_heap_access(!broker_->is_concurrent_inlining());
Type new_type = Type::Any();
diff --git a/chromium/v8/src/compiler/type-narrowing-reducer.h b/chromium/v8/src/compiler/type-narrowing-reducer.h
index ab8c4a483c3..9ec6883449d 100644
--- a/chromium/v8/src/compiler/type-narrowing-reducer.h
+++ b/chromium/v8/src/compiler/type-narrowing-reducer.h
@@ -34,6 +34,7 @@ class V8_EXPORT_PRIVATE TypeNarrowingReducer final
Zone* zone() const;
JSGraph* const jsgraph_;
+ const JSHeapBroker* const broker_;
OperationTyper op_typer_;
};
diff --git a/chromium/v8/src/compiler/typed-optimization.cc b/chromium/v8/src/compiler/typed-optimization.cc
index d5bc845f7c5..3c16c7317c5 100644
--- a/chromium/v8/src/compiler/typed-optimization.cc
+++ b/chromium/v8/src/compiler/typed-optimization.cc
@@ -34,7 +34,7 @@ TypedOptimization::TypedOptimization(Editor* editor,
TypedOptimization::~TypedOptimization() = default;
Reduction TypedOptimization::Reduce(Node* node) {
- DisallowHeapAccessIf no_heap_access(!FLAG_turbo_direct_heap_access);
+ DisallowHeapAccessIf no_heap_access(!broker()->is_concurrent_inlining());
switch (node->opcode()) {
case IrOpcode::kConvertReceiver:
return ReduceConvertReceiver(node);
diff --git a/chromium/v8/src/compiler/typer.cc b/chromium/v8/src/compiler/typer.cc
index 0f182222364..09daaa3864c 100644
--- a/chromium/v8/src/compiler/typer.cc
+++ b/chromium/v8/src/compiler/typer.cc
@@ -1839,8 +1839,6 @@ Type Typer::Visitor::TypeJSCallRuntime(Node* node) {
return TypeUnaryOp(node, ToNumber);
case Runtime::kInlineToObject:
return TypeUnaryOp(node, ToObject);
- case Runtime::kInlineToString:
- return TypeUnaryOp(node, ToString);
case Runtime::kHasInPrototypeChain:
return Type::Boolean();
default:
@@ -2361,6 +2359,10 @@ Type Typer::Visitor::TypeRuntimeAbort(Node* node) { UNREACHABLE(); }
Type Typer::Visitor::TypeAssertType(Node* node) { UNREACHABLE(); }
+Type Typer::Visitor::TypeVerifyType(Node* node) {
+ return TypeOrNone(node->InputAt(0));
+}
+
// Heap constants.
Type Typer::Visitor::TypeConstant(Handle<Object> value) {
diff --git a/chromium/v8/src/compiler/types.cc b/chromium/v8/src/compiler/types.cc
index 236cff3cc68..5c6a4f7db94 100644
--- a/chromium/v8/src/compiler/types.cc
+++ b/chromium/v8/src/compiler/types.cc
@@ -6,6 +6,7 @@
#include <iomanip>
+#include "src/compiler/js-heap-broker.h"
#include "src/handles/handles-inl.h"
#include "src/objects/instance-type.h"
#include "src/objects/objects-inl.h"
@@ -837,7 +838,14 @@ Type Type::Constant(double value, Zone* zone) {
}
Type Type::Constant(JSHeapBroker* broker, Handle<i::Object> value, Zone* zone) {
- ObjectRef ref(broker, value);
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption is
+ // that any handle inserted into the graph is safe to read; but we don't
+ // preserve the reason why it is safe to read. Thus we must over-approximate
+ // here and assume the existence of a memory fence. In the future, we should
+ // consider having the graph store ObjectRefs or ObjectData pointer instead,
+ // which would make new ref construction here unnecessary.
+ ObjectRef ref = MakeRefAssumeMemoryFence(broker, value);
if (ref.IsSmi()) {
return Constant(static_cast<double>(ref.AsSmi()), zone);
}
@@ -969,8 +977,7 @@ const char* BitsetType::Name(bitset bits) {
}
}
-void BitsetType::Print(std::ostream& os, // NOLINT
- bitset bits) {
+void BitsetType::Print(std::ostream& os, bitset bits) {
DisallowGarbageCollection no_gc;
const char* name = Name(bits);
if (name != nullptr) {
diff --git a/chromium/v8/src/compiler/types.h b/chromium/v8/src/compiler/types.h
index b5692298ab0..a28a28c59e9 100644
--- a/chromium/v8/src/compiler/types.h
+++ b/chromium/v8/src/compiler/types.h
@@ -267,7 +267,7 @@ class V8_EXPORT_PRIVATE BitsetType {
static bitset ExpandInternals(bitset bits);
static const char* Name(bitset);
- static void Print(std::ostream& os, bitset); // NOLINT
+ static void Print(std::ostream& os, bitset);
#ifdef DEBUG
static void Print(bitset);
#endif
@@ -413,6 +413,10 @@ class V8_EXPORT_PRIVATE Type {
(Is(Type::PlainNumber()) && Min() == Max());
}
+ bool CanBeAsserted() const {
+ return IsRange() || (Is(Type::Integral32()) && !IsNone());
+ }
+
const HeapConstantType* AsHeapConstant() const;
const OtherNumberConstantType* AsOtherNumberConstant() const;
const RangeType* AsRange() const;
diff --git a/chromium/v8/src/compiler/verifier.cc b/chromium/v8/src/compiler/verifier.cc
index b5b3ab5a981..8b7e22459b9 100644
--- a/chromium/v8/src/compiler/verifier.cc
+++ b/chromium/v8/src/compiler/verifier.cc
@@ -482,8 +482,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
Node* control = NodeProperties::GetControlInput(node, 0);
CHECK_EQ(effect_count, control->op()->ControlInputCount());
CHECK_EQ(input_count, 1 + effect_count);
- // If the control input is a Merge, then make sure that at least one
- // of it's usages is non-phi.
+ // If the control input is a Merge, then make sure that at least one of
+ // its usages is non-phi.
if (control->opcode() == IrOpcode::kMerge) {
bool non_phi_use_found = false;
for (Node* use : control->uses()) {
@@ -954,8 +954,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kSpeculativeBigIntNegate:
CheckTypeIs(node, Type::BigInt());
break;
- case IrOpcode::kBigIntAsUintN:
- CheckValueInputIs(node, 0, Type::BigInt());
+ case IrOpcode::kSpeculativeBigIntAsUintN:
+ CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::BigInt());
break;
case IrOpcode::kBigIntAdd:
@@ -1500,6 +1500,15 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kAssertType:
break;
+ case IrOpcode::kVerifyType:
+ if (NodeProperties::IsTyped(node)) {
+ Node* input = NodeProperties::GetValueInput(node, 0);
+ DCHECK(NodeProperties::IsTyped(input));
+ CHECK(NodeProperties::GetType(node).Equals(
+ NodeProperties::GetType(input)));
+ }
+ break;
+
case IrOpcode::kCheckFloat64Hole:
CheckValueInputIs(node, 0, Type::NumberOrHole());
CheckTypeIs(node, Type::NumberOrUndefined());
@@ -1658,8 +1667,12 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord64Rol:
case IrOpcode::kWord64Ror:
case IrOpcode::kWord64Clz:
- case IrOpcode::kWord64Popcnt:
case IrOpcode::kWord64Ctz:
+ case IrOpcode::kWord64RolLowerable:
+ case IrOpcode::kWord64RorLowerable:
+ case IrOpcode::kWord64ClzLowerable:
+ case IrOpcode::kWord64CtzLowerable:
+ case IrOpcode::kWord64Popcnt:
case IrOpcode::kWord64ReverseBits:
case IrOpcode::kWord64ReverseBytes:
case IrOpcode::kSimd128ReverseBytes:
@@ -1791,6 +1804,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kFloat64ExtractHighWord32:
case IrOpcode::kFloat64InsertLowWord32:
case IrOpcode::kFloat64InsertHighWord32:
+ case IrOpcode::kWord32Select:
+ case IrOpcode::kWord64Select:
case IrOpcode::kFloat32Select:
case IrOpcode::kFloat64Select:
case IrOpcode::kInt32PairAdd:
@@ -1850,7 +1865,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// TODO(rossberg): Check.
break;
}
-} // NOLINT(readability/fn_size)
+}
void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs,
CodeType code_type) {
diff --git a/chromium/v8/src/compiler/wasm-compiler.cc b/chromium/v8/src/compiler/wasm-compiler.cc
index 1b858f5dfcb..6e433def702 100644
--- a/chromium/v8/src/compiler/wasm-compiler.cc
+++ b/chromium/v8/src/compiler/wasm-compiler.cc
@@ -16,7 +16,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/compiler.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/backend/code-generator.h"
@@ -34,7 +34,6 @@
#include "src/compiler/node-origin-table.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
-#include "src/compiler/simd-scalar-lowering.h"
#include "src/compiler/zone-stats.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
@@ -131,11 +130,7 @@ constexpr Builtins::Name WasmRuntimeStubIdToBuiltinName(
#undef DEF_CASE
#undef DEF_TRAP_CASE
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
-#else
- return Builtins::kAbort;
-#endif
}
}
@@ -291,10 +286,6 @@ class WasmGraphAssembler : public GraphAssembler {
// Maps and their contents.
- Node* LoadMap(Node* heap_object) {
- return LoadFromObject(MachineType::TaggedPointer(), heap_object,
- wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
- }
Node* LoadInstanceType(Node* map) {
return LoadFromObject(
MachineType::Uint16(), map,
@@ -710,8 +701,23 @@ void WasmGraphBuilder::PatchInStackCheckIfNeeded() {
if (effect() == dummy) return;
// Now patch all control uses of {start} to use {control} and all effect uses
- // to use {effect} instead. Then rewire the dummy node to use start instead.
+ // to use {effect} instead. We exclude Projection nodes: Projections pointing
+ // to start are floating control, and we want it to point directly to start
+ // because of restrictions later in the pipeline (specifically, loop
+ // unrolling).
+ // Then rewire the dummy node to use start instead.
NodeProperties::ReplaceUses(start, start, effect(), control());
+ {
+ // We need an intermediate vector because we are not allowed to modify a use
+ // while traversing uses().
+ std::vector<Node*> projections;
+ for (Node* use : control()->uses()) {
+ if (use->opcode() == IrOpcode::kProjection) projections.emplace_back(use);
+ }
+ for (Node* use : projections) {
+ use->ReplaceInput(NodeProperties::FirstControlIndex(use), start);
+ }
+ }
NodeProperties::ReplaceUses(dummy, nullptr, start, start);
}
@@ -874,17 +880,19 @@ Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
std::swap(left, right);
break;
case wasm::kExprI64Ror:
- op = m->Word64Ror();
right = MaskShiftCount64(right);
- break;
+ return m->Is64() ? graph()->NewNode(m->Word64Ror(), left, right)
+ : graph()->NewNode(m->Word64RorLowerable(), left, right,
+ control());
case wasm::kExprI64Rol:
if (m->Word64Rol().IsSupported()) {
- op = m->Word64Rol().op();
- right = MaskShiftCount64(right);
- break;
+ return m->Is64() ? graph()->NewNode(m->Word64Rol().op(), left,
+ MaskShiftCount64(right))
+ : graph()->NewNode(m->Word64RolLowerable().op(), left,
+ MaskShiftCount64(right), control());
} else if (m->Word32Rol().IsSupported()) {
- op = m->Word64Rol().placeholder();
- break;
+ return graph()->NewNode(m->Word64RolLowerable().placeholder(), left,
+ right, control());
}
return BuildI64Rol(left, right);
case wasm::kExprF32CopySign:
@@ -1175,19 +1183,22 @@ Node* WasmGraphBuilder::Unop(wasm::WasmOpcode opcode, Node* input,
op = m->BitcastFloat64ToInt64();
break;
case wasm::kExprI64Clz:
- op = m->Word64Clz();
- break;
+ return m->Is64()
+ ? graph()->NewNode(m->Word64Clz(), input)
+ : graph()->NewNode(m->Word64ClzLowerable(), input, control());
case wasm::kExprI64Ctz: {
- OptionalOperator ctz64 = m->Word64Ctz();
- if (ctz64.IsSupported()) {
- op = ctz64.op();
- break;
+ if (m->Word64Ctz().IsSupported()) {
+ return m->Is64() ? graph()->NewNode(m->Word64Ctz().op(), input)
+ : graph()->NewNode(m->Word64CtzLowerable().op(), input,
+ control());
} else if (m->Is32() && m->Word32Ctz().IsSupported()) {
- op = ctz64.placeholder();
- break;
+ return graph()->NewNode(m->Word64CtzLowerable().placeholder(), input,
+ control());
} else if (m->Word64ReverseBits().IsSupported()) {
Node* reversed = graph()->NewNode(m->Word64ReverseBits().op(), input);
- Node* result = graph()->NewNode(m->Word64Clz(), reversed);
+ Node* result = m->Is64() ? graph()->NewNode(m->Word64Clz(), reversed)
+ : graph()->NewNode(m->Word64ClzLowerable(),
+ reversed, control());
return result;
} else {
return BuildI64Ctz(input);
@@ -1301,6 +1312,11 @@ Node* WasmGraphBuilder::BranchExpectFalse(Node* cond, Node** true_node,
return gasm_->Branch(cond, true_node, false_node, BranchHint::kFalse);
}
+Node* WasmGraphBuilder::BranchExpectTrue(Node* cond, Node** true_node,
+ Node** false_node) {
+ return gasm_->Branch(cond, true_node, false_node, BranchHint::kTrue);
+}
+
Node* WasmGraphBuilder::Select(Node *cond, Node* true_node,
Node* false_node, wasm::ValueType type) {
MachineOperatorBuilder* m = mcgraph()->machine();
@@ -1314,6 +1330,14 @@ Node* WasmGraphBuilder::Select(Node *cond, Node* true_node,
return mcgraph()->graph()->NewNode(m->Float64Select().op(), cond,
true_node, false_node);
}
+ if (kind == wasm::kI32 && m->Word32Select().IsSupported()) {
+ return mcgraph()->graph()->NewNode(m->Word32Select().op(), cond, true_node,
+ false_node);
+ }
+ if (kind == wasm::kI64 && m->Word64Select().IsSupported()) {
+ return mcgraph()->graph()->NewNode(m->Word64Select().op(), cond, true_node,
+ false_node);
+ }
// Default to control-flow.
Node* controls[2];
BranchNoHint(cond, &controls[0], &controls[1]);
@@ -2944,7 +2968,7 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Node* imported_function_refs =
LOAD_INSTANCE_FIELD(ImportedFunctionRefs, MachineType::TaggedPointer());
// Access fixed array at {header_size - tag + func_index * kTaggedSize}.
- Node* func_index_intptr = Uint32ToUintptr(func_index);
+ Node* func_index_intptr = BuildChangeUint32ToUintPtr(func_index);
Node* ref_node = gasm_->LoadFixedArrayElement(
imported_function_refs, func_index_intptr, MachineType::TaggedPointer());
@@ -3070,36 +3094,28 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
const wasm::ValueType table_type = env_->module->tables[table_index].type;
// Check that the table entry is not null and that the type of the function is
- // a subtype of the function type declared at the call site. In the absence of
- // function subtyping, the latter can only happen if the table type is (ref
- // null? func). Also, subtyping reduces to normalized signature equality
- // checking.
- // TODO(7748): Expand this with function subtyping once we have that.
+ // **identical with** the function type declared at the call site (no
+ // subtyping of functions is allowed).
+ // Note: Since null entries are identified by having ift_sig_id (-1), we only
+ // need one comparison.
+ // TODO(9495): Change this if we should do full function subtyping instead.
const bool needs_signature_check =
+ FLAG_experimental_wasm_gc ||
table_type.is_reference_to(wasm::HeapType::kFunc) ||
table_type.is_nullable();
if (needs_signature_check) {
Node* int32_scaled_key =
- Uint32ToUintptr(gasm_->Word32Shl(key, Int32Constant(2)));
+ BuildChangeUint32ToUintPtr(gasm_->Word32Shl(key, Int32Constant(2)));
Node* loaded_sig = gasm_->LoadFromObject(MachineType::Int32(), ift_sig_ids,
int32_scaled_key);
-
- if (table_type.is_reference_to(wasm::HeapType::kFunc)) {
- int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
- Node* sig_match =
- gasm_->Word32Equal(loaded_sig, Int32Constant(expected_sig_id));
- TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
- } else {
- // If the table entries are nullable, we still have to check that the
- // entry is initialized.
- Node* function_is_null =
- gasm_->Word32Equal(loaded_sig, Int32Constant(-1));
- TrapIfTrue(wasm::kTrapNullDereference, function_is_null, position);
- }
+ int32_t expected_sig_id = env_->module->canonicalized_type_ids[sig_index];
+ Node* sig_match =
+ gasm_->Word32Equal(loaded_sig, Int32Constant(expected_sig_id));
+ TrapIfFalse(wasm::kTrapFuncSigMismatch, sig_match, position);
}
- Node* key_intptr = Uint32ToUintptr(key);
+ Node* key_intptr = BuildChangeUint32ToUintPtr(key);
Node* target_instance = gasm_->LoadFixedArrayElement(
ift_instances, key_intptr, MachineType::TaggedPointer());
@@ -3124,13 +3140,28 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
}
}
-Node* WasmGraphBuilder::BuildLoadJumpTableOffsetFromExportedFunctionData(
+Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData(
Node* function_data) {
- Node* jump_table_offset_smi = gasm_->LoadFromObject(
- MachineType::TaggedSigned(), function_data,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kJumpTableOffsetOffset));
- return BuildChangeSmiToIntPtr(jump_table_offset_smi);
+ // TODO(saelo) move this code into a common LoadExternalPointer routine?
+#ifdef V8_HEAP_SANDBOX
+ Node* index = gasm_->LoadFromObject(
+ MachineType::Pointer(), function_data,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset));
+
+ Node* isolate_root = BuildLoadIsolateRoot();
+ Node* table =
+ gasm_->LoadFromObject(MachineType::Pointer(), isolate_root,
+ IsolateData::external_pointer_table_offset() +
+ Internals::kExternalPointerTableBufferOffset);
+ Node* offset = gasm_->Int32Mul(index, gasm_->Int32Constant(8));
+ Node* decoded_ptr = gasm_->Load(MachineType::Pointer(), table, offset);
+ Node* tag = gasm_->IntPtrConstant(~kForeignForeignAddressTag);
+ return gasm_->WordAnd(decoded_ptr, tag);
+#else
+ return gasm_->LoadFromObject(
+ MachineType::Pointer(), function_data,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset));
+#endif
}
// TODO(9495): Support CAPI function refs.
@@ -3148,77 +3179,33 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
Node* function_data = gasm_->LoadFunctionDataFromJSFunction(args[0]);
- Node* is_js_function =
- gasm_->HasInstanceType(function_data, WASM_JS_FUNCTION_DATA_TYPE);
-
- auto js_label = gasm_->MakeLabel();
- auto end_label = gasm_->MakeLabel(MachineRepresentation::kTaggedPointer,
- MachineRepresentation::kTaggedPointer);
+ auto load_target = gasm_->MakeLabel();
+ auto end_label = gasm_->MakeLabel(MachineType::PointerRepresentation());
- gasm_->GotoIf(is_js_function, &js_label);
+ Node* instance_node = gasm_->LoadFromObject(
+ MachineType::TaggedPointer(), function_data,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset));
+ Node* is_pair = gasm_->HasInstanceType(instance_node, TUPLE2_TYPE);
+ gasm_->GotoIfNot(is_pair, &load_target);
{
- // Call to a WasmExportedFunction.
- // Load instance object corresponding to module where callee is defined.
- Node* callee_instance = gasm_->LoadExportedFunctionInstance(function_data);
- Node* function_index = gasm_->LoadExportedFunctionIndexAsSmi(function_data);
-
- auto imported_label = gasm_->MakeLabel();
-
- // Check if callee is a locally defined or imported function in its module.
- Node* imported_function_refs = gasm_->LoadFromObject(
- MachineType::TaggedPointer(), callee_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionRefsOffset));
- Node* imported_functions_num =
- gasm_->LoadFixedArrayLengthAsSmi(imported_function_refs);
- gasm_->GotoIf(gasm_->SmiLessThan(function_index, imported_functions_num),
- &imported_label);
- {
- // Function locally defined in module.
- Node* jump_table_start =
- gasm_->LoadFromObject(MachineType::Pointer(), callee_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kJumpTableStartOffset));
- Node* jump_table_offset =
- BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
- Node* jump_table_slot =
- gasm_->IntAdd(jump_table_start, jump_table_offset);
-
- gasm_->Goto(&end_label, jump_table_slot,
- callee_instance /* Unused, dummy value */);
- }
-
- {
- // Function imported to module.
- gasm_->Bind(&imported_label);
- Node* function_index_intptr = BuildChangeSmiToIntPtr(function_index);
-
- Node* imported_instance = gasm_->LoadFixedArrayElement(
- imported_function_refs, function_index_intptr,
- MachineType::TaggedPointer());
-
- Node* imported_function_targets = gasm_->LoadFromObject(
- MachineType::Pointer(), callee_instance,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionTargetsOffset));
-
- Node* target_node = gasm_->LoadFromObject(
- MachineType::Pointer(), imported_function_targets,
- gasm_->IntMul(function_index_intptr,
- gasm_->IntPtrConstant(kSystemPointerSize)));
-
- gasm_->Goto(&end_label, target_node, imported_instance);
- }
- }
-
+ // Overwrite the tuple's "instance" entry with the current instance.
+ // TODO(jkummerow): Can we avoid this, by guaranteeing that it's always
+ // pre-populated?
+ gasm_->StoreToObject(
+ ObjectAccess(MachineType::TaggedPointer(), kFullWriteBarrier),
+ instance_node, wasm::ObjectAccess::ToTagged(Tuple2::kValue1Offset),
+ GetInstance());
+ gasm_->Goto(&load_target);
+ }
+
+ gasm_->Bind(&load_target);
+ Node* target = BuildLoadCallTargetFromExportedFunctionData(function_data);
+ Node* is_null_target = gasm_->WordEqual(target, gasm_->IntPtrConstant(0));
+ gasm_->GotoIfNot(is_null_target, &end_label, target);
{
- // Call to a WasmJSFunction. The call target is
- // function_data->wasm_to_js_wrapper_code()->instruction_start().
- // The instance_node is the pair
- // (current WasmInstanceObject, function_data->callable()).
- gasm_->Bind(&js_label);
-
+ // Compute the call target from the (on-heap) wrapper code. The cached
+ // target can only be null for WasmJSFunctions.
Node* wrapper_code = gasm_->LoadFromObject(
MachineType::TaggedPointer(), function_data,
wasm::ObjectAccess::ToTagged(
@@ -3226,23 +3213,12 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, Vector<Node*> args,
Node* call_target = gasm_->IntAdd(
wrapper_code,
gasm_->IntPtrConstant(wasm::ObjectAccess::ToTagged(Code::kHeaderSize)));
-
- Node* callable = gasm_->LoadFromObject(
- MachineType::TaggedPointer(), function_data,
- wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
- // TODO(manoskouk): Find an elegant way to avoid allocating this pair for
- // every call.
- Node* function_instance_node =
- gasm_->CallBuiltin(Builtins::kWasmAllocatePair, Operator::kEliminatable,
- GetInstance(), callable);
-
- gasm_->Goto(&end_label, call_target, function_instance_node);
+ gasm_->Goto(&end_label, call_target);
}
gasm_->Bind(&end_label);
args[0] = end_label.PhiAt(0);
- Node* instance_node = end_label.PhiAt(1);
const UseRetpoline use_retpoline =
untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
@@ -3345,6 +3321,25 @@ Node* WasmGraphBuilder::BuildChangeIntPtrToInt64(Node* value) {
: value;
}
+Node* WasmGraphBuilder::BuildChangeUint32ToUintPtr(Node* node) {
+ if (mcgraph()->machine()->Is32()) return node;
+ // Fold instances of ChangeUint32ToUint64(IntConstant) directly.
+ Uint32Matcher matcher(node);
+ if (matcher.HasResolvedValue()) {
+ uintptr_t value = matcher.ResolvedValue();
+ return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
+ }
+ return gasm_->ChangeUint32ToUint64(node);
+}
+
+Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
+ return gasm_->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+}
+
+Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
+ return Int32Constant(kSmiShiftSize + kSmiTagSize);
+}
+
Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
// With pointer compression, only the lower 32 bits are used.
return COMPRESS_POINTERS_BOOL
@@ -3356,31 +3351,22 @@ Node* WasmGraphBuilder::BuildChangeInt32ToSmi(Node* value) {
Node* WasmGraphBuilder::BuildChangeUint31ToSmi(Node* value) {
return COMPRESS_POINTERS_BOOL
? gasm_->Word32Shl(value, BuildSmiShiftBitsConstant32())
- : gasm_->WordShl(Uint32ToUintptr(value),
+ : gasm_->WordShl(BuildChangeUint32ToUintPtr(value),
BuildSmiShiftBitsConstant());
}
-Node* WasmGraphBuilder::BuildSmiShiftBitsConstant() {
- return gasm_->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
-}
-
-Node* WasmGraphBuilder::BuildSmiShiftBitsConstant32() {
- return Int32Constant(kSmiShiftSize + kSmiTagSize);
-}
-
Node* WasmGraphBuilder::BuildChangeSmiToInt32(Node* value) {
return COMPRESS_POINTERS_BOOL
- ? gasm_->Word32Sar(gasm_->TruncateInt64ToInt32(value),
- BuildSmiShiftBitsConstant32())
- : BuildTruncateIntPtrToInt32(BuildChangeSmiToIntPtr(value));
+ ? gasm_->Word32Sar(value, BuildSmiShiftBitsConstant32())
+ : BuildTruncateIntPtrToInt32(
+ gasm_->WordSar(value, BuildSmiShiftBitsConstant()));
}
Node* WasmGraphBuilder::BuildChangeSmiToIntPtr(Node* value) {
- if (COMPRESS_POINTERS_BOOL) {
- value = BuildChangeSmiToInt32(value);
- return BuildChangeInt32ToIntPtr(value);
- }
- return gasm_->WordSar(value, BuildSmiShiftBitsConstant());
+ return COMPRESS_POINTERS_BOOL
+ ? BuildChangeInt32ToIntPtr(
+ gasm_->Word32Sar(value, BuildSmiShiftBitsConstant32()))
+ : gasm_->WordSar(value, BuildSmiShiftBitsConstant());
}
Node* WasmGraphBuilder::BuildConvertUint32ToSmiWithSaturation(Node* value,
@@ -3565,8 +3551,8 @@ void WasmGraphBuilder::GetBaseAndOffsetForImportedMutableExternRefGlobal(
// From the index, calculate the actual offset in the FixedArray. This
// is kHeaderSize + (index * kTaggedSize). kHeaderSize can be acquired with
// wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0).
- Node* index_times_tagged_size =
- gasm_->IntMul(Uint32ToUintptr(index), Int32Constant(kTaggedSize));
+ Node* index_times_tagged_size = gasm_->IntMul(
+ BuildChangeUint32ToUintPtr(index), Int32Constant(kTaggedSize));
*offset = gasm_->IntAdd(
index_times_tagged_size,
mcgraph()->IntPtrConstant(
@@ -3763,7 +3749,7 @@ Node* WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
wasm::WasmCodePosition position,
EnforceBoundsCheck enforce_check) {
DCHECK_LE(1, access_size);
- if (!env_->module->is_memory64) index = Uint32ToUintptr(index);
+ if (!env_->module->is_memory64) index = BuildChangeUint32ToUintPtr(index);
if (!FLAG_wasm_bounds_checks) return index;
if (use_trap_handler() && enforce_check == kCanOmitBoundsCheck) {
@@ -4317,7 +4303,7 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
// Note that we check against the memory size ignoring the size of the
// stored value, which is conservative if misaligned. Technically, asm.js
// should never have misaligned accesses.
- index = Uint32ToUintptr(index);
+ index = BuildChangeUint32ToUintPtr(index);
Diamond bounds_check(graph(), mcgraph()->common(),
gasm_->UintLessThan(index, mem_size), BranchHint::kTrue);
bounds_check.Chain(control());
@@ -4356,17 +4342,6 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
return bounds_check.Phi(type.representation(), load, oob_value);
}
-Node* WasmGraphBuilder::Uint32ToUintptr(Node* node) {
- if (mcgraph()->machine()->Is32()) return node;
- // Fold instances of ChangeUint32ToUint64(IntConstant) directly.
- Uint32Matcher matcher(node);
- if (matcher.HasResolvedValue()) {
- uintptr_t value = matcher.ResolvedValue();
- return mcgraph()->IntPtrConstant(bit_cast<intptr_t>(value));
- }
- return gasm_->ChangeUint32ToUint64(node);
-}
-
Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
Node* val) {
DCHECK_NOT_NULL(instance_cache_);
@@ -4391,7 +4366,7 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
index = gasm_->Word32And(index, mem_mask);
}
- index = Uint32ToUintptr(index);
+ index = BuildChangeUint32ToUintPtr(index);
const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
Node* store = graph()->NewNode(store_op, mem_start, index, val, effect(),
@@ -5335,12 +5310,7 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
case wasm::kExprI64AtomicWait:
return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
-#else
- // Return something for older GCC.
- return {kSpecial, MachineType::Int64(), OperatorByType{nullptr}};
-#endif
}
}
};
@@ -5572,6 +5542,16 @@ Node* WasmGraphBuilder::StructNewWithRtt(uint32_t struct_index,
for (uint32_t i = 0; i < type->field_count(); i++) {
gasm_->StoreStructField(s, type, i, fields[i]);
}
+ if (type->field_count() == 0) {
+ static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
+ WasmStruct::kHeaderSize == kTaggedSize,
+ "empty structs need exactly one padding field");
+ wasm::ValueType fake_type = wasm::kWasmAnyRef;
+ Node* padding_offset = gasm_->IntPtrConstant(
+ wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize));
+ gasm_->StoreToObject(ObjectAccessForGCStores(fake_type), s, padding_offset,
+ RefNull());
+ }
return s;
}
@@ -6560,7 +6540,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
Node* BuildCallAllocateJSArray(Node* array_length, Node* context) {
// Since we don't check that args will fit in an array,
// we make sure this is true based on statically known limits.
- STATIC_ASSERT(wasm::kV8MaxWasmFunctionMultiReturns <=
+ STATIC_ASSERT(wasm::kV8MaxWasmFunctionReturns <=
JSArray::kInitialMaxFastElementArray);
return gasm_->CallBuiltin(Builtins::kWasmAllocateJSArray,
Operator::kEliminatable, array_length, context);
@@ -6588,15 +6568,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
wasm::kNoCodePosition, function_index, kCallContinues);
} else {
// Call to a wasm function defined in this module.
- // The call target is the jump table slot for that function.
- Node* jump_table_start =
- LOAD_INSTANCE_FIELD(JumpTableStart, MachineType::Pointer());
- Node* jump_table_offset =
- BuildLoadJumpTableOffsetFromExportedFunctionData(function_data);
- Node* jump_table_slot =
- gasm_->IntAdd(jump_table_start, jump_table_offset);
- args[0] = jump_table_slot;
-
+ // The (cached) call target is the jump table slot for that function.
+ args[0] = BuildLoadCallTargetFromExportedFunctionData(function_data);
BuildWasmCall(sig_, VectorOf(args), VectorOf(rets),
wasm::kNoCodePosition, nullptr, kNoRetpoline,
frame_state);
@@ -6670,9 +6643,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
case wasm::kF64: {
auto done = gasm_->MakeLabel();
gasm_->GotoIf(IsSmi(input), &done);
- Node* map = gasm_->LoadFromObject(
- MachineType::TaggedPointer(), input,
- wasm::ObjectAccess::ToTagged(HeapObject::kMapOffset));
+ Node* map = gasm_->LoadMap(input);
Node* heap_number_map = LOAD_ROOT(HeapNumberMap, heap_number_map);
Node* is_heap_number = gasm_->WordEqual(heap_number_map, map);
gasm_->GotoIf(is_heap_number, &done);
@@ -7030,6 +7001,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
BuildModifyThreadInWasmFlag(true);
+ Node* old_effect = effect();
Node* exception_branch = graph()->NewNode(
mcgraph()->common()->Branch(BranchHint::kTrue),
gasm_->WordEqual(return_value, mcgraph()->IntPtrConstant(0)),
@@ -7046,10 +7018,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
gasm_->Call(call_descriptor, call_target, return_value);
TerminateThrow(effect(), control());
- SetEffectControl(
- return_value,
- graph()->NewNode(mcgraph()->common()->IfTrue(), exception_branch));
- DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionMultiReturns);
+ SetEffectControl(old_effect, graph()->NewNode(mcgraph()->common()->IfTrue(),
+ exception_branch));
+ DCHECK_LT(sig_->return_count(), wasm::kV8MaxWasmFunctionReturns);
size_t return_count = sig_->return_count();
if (return_count == 0) {
Return(Int32Constant(0));
@@ -7090,9 +7061,12 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Load the original callable from the closure.
Node* func_data = gasm_->LoadFunctionDataFromJSFunction(closure);
- Node* callable = gasm_->LoadFromObject(
+ Node* pair = gasm_->LoadFromObject(
MachineType::AnyTagged(), func_data,
- wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset));
+ wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kRefOffset));
+ Node* callable = gasm_->LoadFromObject(
+ MachineType::AnyTagged(), pair,
+ wasm::ObjectAccess::ToTagged(Tuple2::kValue2Offset));
// Call the underlying closure.
base::SmallVector<Node*, 16> args(wasm_count + 7);
@@ -7478,7 +7452,7 @@ wasm::WasmCompilationResult CompileWasmMathIntrinsic(
wasm::CompilationEnv env(
nullptr, wasm::UseTrapHandler::kNoTrapHandler,
wasm::RuntimeExceptionSupport::kNoRuntimeExceptionSupport,
- wasm::WasmFeatures::All(), wasm::LowerSimd::kNoLowerSimd);
+ wasm::WasmFeatures::All());
WasmGraphBuilder builder(&env, mcgraph->zone(), mcgraph, sig,
source_positions);
@@ -7644,7 +7618,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
MachineGraph* mcgraph = zone->New<MachineGraph>(graph, common, machine);
WasmWrapperGraphBuilder builder(zone.get(), mcgraph, sig, nullptr, nullptr,
- nullptr, StubCallMode::kCallWasmRuntimeStub,
+ nullptr, StubCallMode::kCallBuiltinPointer,
wasm::WasmFeatures::FromIsolate(isolate));
builder.BuildWasmToJSWrapper(kind, expected_arity);
@@ -7792,7 +7766,7 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
source_positions);
wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
allocator, env->enabled_features, env->module, &builder, detected,
- func_body, loop_infos, node_origins);
+ func_body, loop_infos, node_origins, func_index);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7802,49 +7776,8 @@ bool BuildGraphForWasmFunction(AccountingAllocator* allocator,
return false;
}
- // Lower SIMD first, i64x2 nodes will be lowered to int64 nodes, then int64
- // lowering will take care of them.
auto sig = CreateMachineSignature(mcgraph->zone(), func_body.sig,
WasmGraphBuilder::kCalledFromWasm);
- if (builder.has_simd() &&
- (!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
- SimplifiedOperatorBuilder simplified(mcgraph->zone());
- SimdScalarLowering(mcgraph, &simplified, sig).LowerGraph();
-
- // SimdScalarLowering changes all v128 to 4 i32, so update the machine
- // signature for the call to LowerInt64.
- size_t return_count = 0;
- size_t param_count = 0;
- for (auto ret : sig->returns()) {
- return_count += ret == MachineRepresentation::kSimd128 ? 4 : 1;
- }
- for (auto param : sig->parameters()) {
- param_count += param == MachineRepresentation::kSimd128 ? 4 : 1;
- }
-
- Signature<MachineRepresentation>::Builder sig_builder(
- mcgraph->zone(), return_count, param_count);
- for (auto ret : sig->returns()) {
- if (ret == MachineRepresentation::kSimd128) {
- for (int i = 0; i < 4; ++i) {
- sig_builder.AddReturn(MachineRepresentation::kWord32);
- }
- } else {
- sig_builder.AddReturn(ret);
- }
- }
- for (auto param : sig->parameters()) {
- if (param == MachineRepresentation::kSimd128) {
- for (int i = 0; i < 4; ++i) {
- sig_builder.AddParam(MachineRepresentation::kWord32);
- }
- } else {
- sig_builder.AddParam(param);
- }
- }
- sig = sig_builder.Build();
- }
-
builder.LowerInt64(sig);
if (func_index >= FLAG_trace_wasm_ast_start &&
@@ -7919,8 +7852,7 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
- if (ContainsSimd(func_body.sig) &&
- (!CpuFeatures::SupportsWasmSimd128() || env->lower_simd)) {
+ if (ContainsSimd(func_body.sig) && !CpuFeatures::SupportsWasmSimd128()) {
call_descriptor = GetI32WasmCallDescriptorForSimd(&zone, call_descriptor);
}
diff --git a/chromium/v8/src/compiler/wasm-compiler.h b/chromium/v8/src/compiler/wasm-compiler.h
index a77e76a05d9..bb8f5f5efc0 100644
--- a/chromium/v8/src/compiler/wasm-compiler.h
+++ b/chromium/v8/src/compiler/wasm-compiler.h
@@ -281,6 +281,7 @@ class WasmGraphBuilder {
//-----------------------------------------------------------------------
Node* BranchNoHint(Node* cond, Node** true_node, Node** false_node);
Node* BranchExpectFalse(Node* cond, Node** true_node, Node** false_node);
+ Node* BranchExpectTrue(Node* cond, Node** true_node, Node** false_node);
void TrapIfTrue(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position);
@@ -530,7 +531,6 @@ class WasmGraphBuilder {
Node* CheckBoundsAndAlignment(int8_t access_size, Node* index,
uint64_t offset, wasm::WasmCodePosition);
- Node* Uint32ToUintptr(Node*);
const Operator* GetSafeLoadOperator(int offset, wasm::ValueType type);
const Operator* GetSafeStoreOperator(int offset, wasm::ValueType type);
Node* BuildChangeEndiannessStore(Node* node, MachineRepresentation rep,
@@ -631,6 +631,7 @@ class WasmGraphBuilder {
Node* BuildTruncateIntPtrToInt32(Node* value);
Node* BuildChangeInt32ToIntPtr(Node* value);
Node* BuildChangeIntPtrToInt64(Node* value);
+ Node* BuildChangeUint32ToUintPtr(Node*);
Node* BuildChangeInt32ToSmi(Node* value);
Node* BuildChangeUint31ToSmi(Node* value);
Node* BuildSmiShiftBitsConstant();
@@ -700,7 +701,7 @@ class WasmGraphBuilder {
Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig,
Node* iterable, Node* context);
- Node* BuildLoadJumpTableOffsetFromExportedFunctionData(Node* function_data);
+ Node* BuildLoadCallTargetFromExportedFunctionData(Node* function_data);
//-----------------------------------------------------------------------
// Operations involving the CEntry, a dependency we want to remove
diff --git a/chromium/v8/src/d8/async-hooks-wrapper.cc b/chromium/v8/src/d8/async-hooks-wrapper.cc
index 2538e78acbc..84191b98154 100644
--- a/chromium/v8/src/d8/async-hooks-wrapper.cc
+++ b/chromium/v8/src/d8/async-hooks-wrapper.cc
@@ -47,8 +47,7 @@ static AsyncHooksWrap* UnwrapHook(
AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
if (!hooks->async_hook_ctor.Get(isolate)->HasInstance(hook)) {
- isolate->ThrowException(String::NewFromUtf8Literal(
- isolate, "Invalid 'this' passed instead of AsyncHooks instance"));
+ isolate->ThrowError("Invalid 'this' passed instead of AsyncHooks instance");
return nullptr;
}
@@ -87,8 +86,7 @@ Local<Object> AsyncHooks::CreateHook(
Local<Context> currentContext = isolate->GetCurrentContext();
if (args.Length() != 1 || !args[0]->IsObject()) {
- isolate->ThrowException(String::NewFromUtf8Literal(
- isolate, "Invalid arguments passed to createHook"));
+ isolate->ThrowError("Invalid arguments passed to createHook");
return Local<Object>();
}
diff --git a/chromium/v8/src/d8/d8-console.cc b/chromium/v8/src/d8/d8-console.cc
index 7f0904e343f..fc76bab7ab2 100644
--- a/chromium/v8/src/d8/d8-console.cc
+++ b/chromium/v8/src/d8/d8-console.cc
@@ -19,7 +19,7 @@ void WriteToFile(const char* prefix, FILE* file, Isolate* isolate,
Local<Value> arg = args[i];
Local<String> str_obj;
- if (arg->IsSymbol()) arg = Local<Symbol>::Cast(arg)->Description();
+ if (arg->IsSymbol()) arg = Local<Symbol>::Cast(arg)->Description(isolate);
if (!arg->ToString(isolate->GetCurrentContext()).ToLocal(&str_obj)) return;
v8::String::Utf8Value str(isolate, str_obj);
@@ -43,8 +43,7 @@ void D8Console::Assert(const debug::ConsoleCallArguments& args,
// false-ish.
if (args.Length() > 0 && args[0]->BooleanValue(isolate_)) return;
WriteToFile("console.assert", stdout, isolate_, args);
- isolate_->ThrowException(v8::Exception::Error(
- v8::String::NewFromUtf8Literal(isolate_, "console.assert failed")));
+ isolate_->ThrowError("console.assert failed");
}
void D8Console::Log(const debug::ConsoleCallArguments& args,
diff --git a/chromium/v8/src/d8/d8-posix.cc b/chromium/v8/src/d8/d8-posix.cc
index 047b62ef2c4..fa30b9153d4 100644
--- a/chromium/v8/src/d8/d8-posix.cc
+++ b/chromium/v8/src/d8/d8-posix.cc
@@ -159,8 +159,8 @@ class ExecArgs {
bool Init(Isolate* isolate, Local<Value> arg0, Local<Array> command_args) {
String::Utf8Value prog(isolate, arg0);
if (*prog == nullptr) {
- isolate->ThrowException(String::NewFromUtf8Literal(
- isolate, "os.system(): String conversion of program name failed"));
+ isolate->ThrowError(
+ "os.system(): String conversion of program name failed");
return false;
}
int len = prog.length() + 3;
@@ -176,8 +176,8 @@ class ExecArgs {
String::Utf8Value utf8_arg(isolate, arg);
if (*utf8_arg == nullptr) {
exec_args_[i] = nullptr; // Consistent state for destructor.
- isolate->ThrowException(String::NewFromUtf8Literal(
- isolate, "os.system(): String conversion of argument failed."));
+ isolate->ThrowError(
+ "os.system(): String conversion of argument failed.");
return false;
}
int len = utf8_arg.length() + 1;
@@ -214,8 +214,7 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromJust();
} else {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "system: Argument 4 must be a number"));
+ args.GetIsolate()->ThrowError("system: Argument 4 must be a number");
return false;
}
}
@@ -225,14 +224,19 @@ static bool GetTimeouts(const v8::FunctionCallbackInfo<v8::Value>& args,
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromJust();
} else {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "system: Argument 3 must be a number"));
+ args.GetIsolate()->ThrowError("system: Argument 3 must be a number");
return false;
}
}
return true;
}
+namespace {
+v8::Local<v8::String> v8_strerror(v8::Isolate* isolate, int err) {
+ return v8::String::NewFromUtf8(isolate, strerror(err)).ToLocalChecked();
+}
+} // namespace
+
static const int kReadFD = 0;
static const int kWriteFD = 1;
@@ -267,8 +271,7 @@ static bool ChildLaunchedOK(Isolate* isolate, int* exec_error_fds) {
bytes_read = read(exec_error_fds[kReadFD], &err, sizeof(err));
} while (bytes_read == -1 && errno == EINTR);
if (bytes_read != 0) {
- isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(err)).ToLocalChecked());
+ isolate->ThrowError(v8_strerror(isolate, err));
return false;
}
return true;
@@ -286,8 +289,7 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
char buffer[kStdoutReadBufferSize];
if (fcntl(child_fd, F_SETFL, O_NONBLOCK) != 0) {
- return isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
+ return isolate->ThrowError(v8_strerror(isolate, errno));
}
int bytes_read;
@@ -298,8 +300,7 @@ static Local<Value> GetStdout(Isolate* isolate, int child_fd,
if (errno == EAGAIN) {
if (!WaitOnFD(child_fd, read_timeout, total_timeout, start_time) ||
(TimeIsOut(start_time, total_timeout))) {
- return isolate->ThrowException(String::NewFromUtf8Literal(
- isolate, "Timed out waiting for output"));
+ return isolate->ThrowError("Timed out waiting for output");
}
continue;
} else if (errno == EINTR) {
@@ -357,8 +358,7 @@ static bool WaitForChild(Isolate* isolate, int pid,
if (useconds < 1000000) useconds <<= 1;
if ((read_timeout != -1 && useconds / 1000 > read_timeout) ||
(TimeIsOut(start_time, total_timeout))) {
- isolate->ThrowException(String::NewFromUtf8Literal(
- isolate, "Timed out waiting for process to terminate"));
+ isolate->ThrowError("Timed out waiting for process to terminate");
kill(pid, SIGINT);
return false;
}
@@ -367,16 +367,14 @@ static bool WaitForChild(Isolate* isolate, int pid,
char message[999];
snprintf(message, sizeof(message), "Child killed by signal %d",
child_info.si_status);
- isolate->ThrowException(
- String::NewFromUtf8(isolate, message).ToLocalChecked());
+ isolate->ThrowError(message);
return false;
}
if (child_info.si_code == CLD_EXITED && child_info.si_status != 0) {
char message[999];
snprintf(message, sizeof(message), "Child exited with status %d",
child_info.si_status);
- isolate->ThrowException(
- String::NewFromUtf8(isolate, message).ToLocalChecked());
+ isolate->ThrowError(message);
return false;
}
@@ -389,8 +387,7 @@ static bool WaitForChild(Isolate* isolate, int pid,
char message[999];
snprintf(message, sizeof(message), "Child killed by signal %d",
WTERMSIG(child_status));
- isolate->ThrowException(
- String::NewFromUtf8(isolate, message).ToLocalChecked());
+ isolate->ThrowError(message);
return false;
}
if (WEXITSTATUS(child_status) != 0) {
@@ -398,8 +395,7 @@ static bool WaitForChild(Isolate* isolate, int pid,
int exit_status = WEXITSTATUS(child_status);
snprintf(message, sizeof(message), "Child exited with status %d",
exit_status);
- isolate->ThrowException(
- String::NewFromUtf8(isolate, message).ToLocalChecked());
+ isolate->ThrowError(message);
return false;
}
@@ -419,8 +415,7 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Array> command_args;
if (args.Length() > 1) {
if (!args[1]->IsArray()) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "system: Argument 2 must be an array"));
+ args.GetIsolate()->ThrowError("system: Argument 2 must be an array");
return;
}
command_args = args[1].As<Array>();
@@ -428,13 +423,11 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
command_args = Array::New(args.GetIsolate(), 0);
}
if (command_args->Length() > ExecArgs::kMaxArgs) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "Too many arguments to system()"));
+ args.GetIsolate()->ThrowError("Too many arguments to system()");
return;
}
if (args.Length() < 1) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "Too few arguments to system()"));
+ args.GetIsolate()->ThrowError("Too few arguments to system()");
return;
}
@@ -449,13 +442,11 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
int stdout_fds[2];
if (pipe(exec_error_fds) != 0) {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8Literal(args.GetIsolate(), "pipe syscall failed."));
+ args.GetIsolate()->ThrowError("pipe syscall failed.");
return;
}
if (pipe(stdout_fds) != 0) {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8Literal(args.GetIsolate(), "pipe syscall failed."));
+ args.GetIsolate()->ThrowError("pipe syscall failed.");
return;
}
@@ -493,29 +484,24 @@ void Shell::System(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ChangeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "chdir() takes one argument"));
+ args.GetIsolate()->ThrowError("chdir() takes one argument");
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
if (*directory == nullptr) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(),
- "os.chdir(): String conversion of argument failed."));
+ args.GetIsolate()->ThrowError(
+ "os.chdir(): String conversion of argument failed.");
return;
}
if (chdir(*directory) != 0) {
- args.GetIsolate()->ThrowException(
- String::NewFromUtf8(args.GetIsolate(), strerror(errno))
- .ToLocalChecked());
+ args.GetIsolate()->ThrowError(v8_strerror(args.GetIsolate(), errno));
return;
}
}
void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "umask() takes one argument"));
+ args.GetIsolate()->ThrowError("umask() takes one argument");
return;
}
if (args[0]->IsNumber()) {
@@ -524,8 +510,7 @@ void Shell::SetUMask(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(previous);
return;
} else {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "umask() argument must be numeric"));
+ args.GetIsolate()->ThrowError("umask() argument must be numeric");
return;
}
}
@@ -534,13 +519,11 @@ static bool CheckItsADirectory(Isolate* isolate, char* directory) {
struct stat stat_buf;
int stat_result = stat(directory, &stat_buf);
if (stat_result != 0) {
- isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
+ isolate->ThrowError(v8_strerror(isolate, errno));
return false;
}
if ((stat_buf.st_mode & S_IFDIR) != 0) return true;
- isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(EEXIST)).ToLocalChecked());
+ isolate->ThrowError(v8_strerror(isolate, EEXIST));
return false;
}
@@ -554,8 +537,7 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
} else if (errno == ENOENT) { // Intermediate path element is missing.
char* last_slash = strrchr(directory, '/');
if (last_slash == nullptr) {
- isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
+ isolate->ThrowError(v8_strerror(isolate, errno));
return false;
}
*last_slash = 0;
@@ -566,12 +548,10 @@ static bool mkdirp(Isolate* isolate, char* directory, mode_t mask) {
if (errno == EEXIST) {
return CheckItsADirectory(isolate, directory);
}
- isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
+ isolate->ThrowError(v8_strerror(isolate, errno));
return false;
} else {
- isolate->ThrowException(
- String::NewFromUtf8(isolate, strerror(errno)).ToLocalChecked());
+ isolate->ThrowError(v8_strerror(isolate, errno));
return false;
}
}
@@ -584,20 +564,17 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromJust();
} else {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "mkdirp() second argument must be numeric"));
+ args.GetIsolate()->ThrowError("mkdirp() second argument must be numeric");
return;
}
} else if (args.Length() != 1) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "mkdirp() takes one or two arguments"));
+ args.GetIsolate()->ThrowError("mkdirp() takes one or two arguments");
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
if (*directory == nullptr) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(),
- "os.mkdirp(): String conversion of argument failed."));
+ args.GetIsolate()->ThrowError(
+ "os.mkdirp(): String conversion of argument failed.");
return;
}
mkdirp(args.GetIsolate(), *directory, mask);
@@ -605,15 +582,13 @@ void Shell::MakeDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "rmdir() takes one or two arguments"));
+ args.GetIsolate()->ThrowError("rmdir() takes one or two arguments");
return;
}
String::Utf8Value directory(args.GetIsolate(), args[0]);
if (*directory == nullptr) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(),
- "os.rmdir(): String conversion of argument failed."));
+ args.GetIsolate()->ThrowError(
+ "os.rmdir(): String conversion of argument failed.");
return;
}
rmdir(*directory);
@@ -621,22 +596,19 @@ void Shell::RemoveDirectory(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 2) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "setenv() takes two arguments"));
+ args.GetIsolate()->ThrowError("setenv() takes two arguments");
return;
}
String::Utf8Value var(args.GetIsolate(), args[0]);
String::Utf8Value value(args.GetIsolate(), args[1]);
if (*var == nullptr) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(),
- "os.setenv(): String conversion of variable name failed."));
+ args.GetIsolate()->ThrowError(
+ "os.setenv(): String conversion of variable name failed.");
return;
}
if (*value == nullptr) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(),
- "os.setenv(): String conversion of variable contents failed."));
+ args.GetIsolate()->ThrowError(
+ "os.setenv(): String conversion of variable contents failed.");
return;
}
setenv(*var, *value, 1);
@@ -644,15 +616,13 @@ void Shell::SetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::UnsetEnvironment(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(), "unsetenv() takes one argument"));
+ args.GetIsolate()->ThrowError("unsetenv() takes one argument");
return;
}
String::Utf8Value var(args.GetIsolate(), args[0]);
if (*var == nullptr) {
- args.GetIsolate()->ThrowException(String::NewFromUtf8Literal(
- args.GetIsolate(),
- "os.setenv(): String conversion of variable name failed."));
+ args.GetIsolate()->ThrowError(
+ "os.setenv(): String conversion of variable name failed.");
return;
}
unsetenv(*var);
diff --git a/chromium/v8/src/d8/d8-test.cc b/chromium/v8/src/d8/d8-test.cc
index e5eb5e419bf..741b838b760 100644
--- a/chromium/v8/src/d8/d8-test.cc
+++ b/chromium/v8/src/d8/d8-test.cc
@@ -21,16 +21,29 @@
namespace v8 {
namespace {
+
+#define CHECK_SELF_OR_FALLBACK(return_value) \
+ if (!self) { \
+ options.fallback = 1; \
+ return return_value; \
+ }
+
+#define CHECK_SELF_OR_THROW() \
+ if (!self) { \
+ args.GetIsolate()->ThrowError( \
+ "This method is not defined on objects inheriting from FastCAPI."); \
+ return; \
+ }
+
class FastCApiObject {
public:
- static double AddAllFastCallback(ApiObject receiver, bool should_fallback,
+ static double AddAllFastCallback(Local<Object> receiver, bool should_fallback,
int32_t arg_i32, uint32_t arg_u32,
int64_t arg_i64, uint64_t arg_u64,
float arg_f32, double arg_f64,
FastApiCallbackOptions& options) {
- Value* receiver_value = reinterpret_cast<Value*>(&receiver);
- CHECK(receiver_value->IsObject());
- FastCApiObject* self = UnwrapObject(Object::Cast(receiver_value));
+ FastCApiObject* self = UnwrapObject(receiver);
+ CHECK_SELF_OR_FALLBACK(0);
self->fast_call_count_++;
if (should_fallback) {
@@ -42,33 +55,42 @@ class FastCApiObject {
static_cast<double>(arg_i64) + static_cast<double>(arg_u64) +
static_cast<double>(arg_f32) + arg_f64;
}
+ static double AddAllFastCallback_5Args(Local<Object> receiver,
+ bool should_fallback, int32_t arg_i32,
+ uint32_t arg_u32, int64_t arg_i64,
+ uint64_t arg_u64, float arg_f32,
+ FastApiCallbackOptions& options) {
+ return AddAllFastCallback(receiver, should_fallback, arg_i32, arg_u32,
+ arg_i64, arg_u64, arg_f32, 0, options);
+ }
static void AddAllSlowCallback(const FunctionCallbackInfo<Value>& args) {
Isolate* isolate = args.GetIsolate();
- FastCApiObject* self = UnwrapObject(*args.This());
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
self->slow_call_count_++;
HandleScope handle_scope(isolate);
double sum = 0;
- if (args.Length() > 1) {
+ if (args.Length() > 1 && args[1]->IsNumber()) {
sum += args[1]->Int32Value(isolate->GetCurrentContext()).FromJust();
}
- if (args.Length() > 2) {
+ if (args.Length() > 2 && args[2]->IsNumber()) {
sum += args[2]->Uint32Value(isolate->GetCurrentContext()).FromJust();
}
- if (args.Length() > 3) {
+ if (args.Length() > 3 && args[3]->IsNumber()) {
sum += args[3]->IntegerValue(isolate->GetCurrentContext()).FromJust();
}
- if (args.Length() > 4) {
+ if (args.Length() > 4 && args[4]->IsNumber()) {
sum += args[4]->IntegerValue(isolate->GetCurrentContext()).FromJust();
}
- if (args.Length() > 5) {
+ if (args.Length() > 5 && args[5]->IsNumber()) {
sum += args[5]->NumberValue(isolate->GetCurrentContext()).FromJust();
} else {
sum += std::numeric_limits<double>::quiet_NaN();
}
- if (args.Length() > 6) {
+ if (args.Length() > 6 && args[6]->IsNumber()) {
sum += args[6]->NumberValue(isolate->GetCurrentContext()).FromJust();
} else {
sum += std::numeric_limits<double>::quiet_NaN();
@@ -77,12 +99,12 @@ class FastCApiObject {
args.GetReturnValue().Set(Number::New(isolate, sum));
}
- static int Add32BitIntFastCallback(ApiObject receiver, bool should_fallback,
- int32_t arg_i32, uint32_t arg_u32,
+ static int Add32BitIntFastCallback(v8::Local<v8::Object> receiver,
+ bool should_fallback, int32_t arg_i32,
+ uint32_t arg_u32,
FastApiCallbackOptions& options) {
- Value* receiver_value = reinterpret_cast<Value*>(&receiver);
- CHECK(receiver_value->IsObject());
- FastCApiObject* self = UnwrapObject(Object::Cast(receiver_value));
+ FastCApiObject* self = UnwrapObject(receiver);
+ CHECK_SELF_OR_FALLBACK(0);
self->fast_call_count_++;
if (should_fallback) {
@@ -95,40 +117,104 @@ class FastCApiObject {
static void Add32BitIntSlowCallback(const FunctionCallbackInfo<Value>& args) {
Isolate* isolate = args.GetIsolate();
- FastCApiObject* self = UnwrapObject(*args.This());
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
self->slow_call_count_++;
HandleScope handle_scope(isolate);
double sum = 0;
- if (args.Length() > 1) {
+ if (args.Length() > 1 && args[1]->IsNumber()) {
sum += args[1]->Int32Value(isolate->GetCurrentContext()).FromJust();
}
- if (args.Length() > 2) {
+ if (args.Length() > 2 && args[2]->IsNumber()) {
sum += args[2]->Uint32Value(isolate->GetCurrentContext()).FromJust();
}
args.GetReturnValue().Set(Number::New(isolate, sum));
}
+ static bool IsFastCApiObjectFastCallback(v8::Local<v8::Object> receiver,
+ bool should_fallback,
+ v8::Local<v8::Value> arg,
+ FastApiCallbackOptions& options) {
+ FastCApiObject* self = UnwrapObject(receiver);
+ CHECK_SELF_OR_FALLBACK(false);
+ self->fast_call_count_++;
+
+ if (should_fallback) {
+ options.fallback = 1;
+ return false;
+ }
+
+ if (!arg->IsObject()) {
+ return false;
+ }
+ Local<Object> object = arg.As<Object>();
+ if (!IsValidApiObject(object)) return false;
+
+ internal::Isolate* i_isolate =
+ internal::IsolateFromNeverReadOnlySpaceObject(
+ *reinterpret_cast<internal::Address*>(*object));
+ CHECK_NOT_NULL(i_isolate);
+ Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
+ HandleScope handle_scope(isolate);
+ return PerIsolateData::Get(isolate)
+ ->GetTestApiObjectCtor()
+ ->IsLeafTemplateForApiObject(object);
+ }
+ static void IsFastCApiObjectSlowCallback(
+ const FunctionCallbackInfo<Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+ self->slow_call_count_++;
+
+ HandleScope handle_scope(isolate);
+
+ bool result = false;
+ if (args.Length() < 2) {
+ args.GetIsolate()->ThrowError(
+ "is_valid_api_object should be called with 2 arguments");
+ return;
+ }
+ if (args[1]->IsObject()) {
+ Local<Object> object = args[1].As<Object>();
+ if (!IsValidApiObject(object)) {
+ result = false;
+ } else {
+ result = PerIsolateData::Get(args.GetIsolate())
+ ->GetTestApiObjectCtor()
+ ->IsLeafTemplateForApiObject(object);
+ }
+ }
+
+ args.GetReturnValue().Set(Boolean::New(isolate, result));
+ }
+
static void FastCallCount(const FunctionCallbackInfo<Value>& args) {
- FastCApiObject* self = UnwrapObject(*args.This());
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
args.GetReturnValue().Set(
Number::New(args.GetIsolate(), self->fast_call_count()));
}
static void SlowCallCount(const FunctionCallbackInfo<Value>& args) {
- FastCApiObject* self = UnwrapObject(*args.This());
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
args.GetReturnValue().Set(
Number::New(args.GetIsolate(), self->slow_call_count()));
}
static void ResetCounts(const FunctionCallbackInfo<Value>& args) {
- FastCApiObject* self = UnwrapObject(*args.This());
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
self->reset_counts();
args.GetReturnValue().Set(Undefined(args.GetIsolate()));
}
- static void SupportsFPParams(const FunctionCallbackInfo<Value>& info) {
- FastCApiObject* self = UnwrapObject(*info.This());
- info.GetReturnValue().Set(self->supports_fp_params_);
+ static void SupportsFPParams(const FunctionCallbackInfo<Value>& args) {
+ FastCApiObject* self = UnwrapObject(args.This());
+ CHECK_SELF_OR_THROW();
+ args.GetReturnValue().Set(self->supports_fp_params_);
}
int fast_call_count() const { return fast_call_count_; }
@@ -141,12 +227,14 @@ class FastCApiObject {
static const int kV8WrapperObjectIndex = 1;
private:
- static FastCApiObject* UnwrapObject(Object* object) {
- i::Address addr = *reinterpret_cast<i::Address*>(object);
+ static bool IsValidApiObject(Local<Object> object) {
+ i::Address addr = *reinterpret_cast<i::Address*>(*object);
auto instance_type = i::Internals::GetInstanceType(addr);
- if (instance_type != i::Internals::kJSObjectType &&
- instance_type != i::Internals::kJSApiObjectType &&
- instance_type != i::Internals::kJSSpecialApiObjectType) {
+ return (instance_type == i::Internals::kJSApiObjectType ||
+ instance_type == i::Internals::kJSSpecialApiObjectType);
+ }
+ static FastCApiObject* UnwrapObject(Local<Object> object) {
+ if (!IsValidApiObject(object)) {
return nullptr;
}
FastCApiObject* wrapped = reinterpret_cast<FastCApiObject*>(
@@ -162,18 +250,18 @@ class FastCApiObject {
#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
};
+#undef CHECK_SELF_OR_THROW
+#undef CHECK_SELF_OR_FALLBACK
+
// The object is statically initialized for simplicity, typically the embedder
// will take care of managing their C++ objects lifetime.
thread_local FastCApiObject kFastCApiObject;
} // namespace
-// TODO(mslekova): Rename the fast_c_api helper to FastCAPI.
-void CreateObject(const FunctionCallbackInfo<Value>& info) {
+void CreateFastCAPIObject(const FunctionCallbackInfo<Value>& info) {
if (!info.IsConstructCall()) {
- info.GetIsolate()->ThrowException(
- v8::Exception::Error(String::NewFromUtf8Literal(
- info.GetIsolate(),
- "FastCAPI helper must be constructed with new.")));
+ info.GetIsolate()->ThrowError(
+ "FastCAPI helper must be constructed with new.");
return;
}
Local<Object> api_object = info.Holder();
@@ -189,7 +277,8 @@ void CreateObject(const FunctionCallbackInfo<Value>& info) {
Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
Local<FunctionTemplate> api_obj_ctor =
- FunctionTemplate::New(isolate, CreateObject);
+ FunctionTemplate::New(isolate, CreateFastCAPIObject);
+ PerIsolateData::Get(isolate)->SetTestApiObjectCtor(api_obj_ctor);
Local<Signature> signature = Signature::New(isolate, api_obj_ctor);
{
CFunction add_all_c_func =
@@ -200,6 +289,19 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
Local<Value>(), signature, 1,
ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_all_c_func));
+
+ // To test function overloads.
+ CFunction add_all_5args_c_func =
+ CFunction::Make(FastCApiObject::AddAllFastCallback_5Args);
+ const CFunction c_function_overloads[] = {add_all_c_func,
+ add_all_5args_c_func};
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "overloaded_add_all",
+ FunctionTemplate::NewWithCFunctionOverloads(
+ isolate, FastCApiObject::AddAllSlowCallback, Local<Value>(),
+ signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, {c_function_overloads, 2}));
+
CFunction add_32bit_int_c_func =
CFunction::Make(FastCApiObject::Add32BitIntFastCallback);
api_obj_ctor->PrototypeTemplate()->Set(
@@ -208,6 +310,14 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
isolate, FastCApiObject::Add32BitIntSlowCallback, Local<Value>(),
signature, 1, ConstructorBehavior::kThrow,
SideEffectType::kHasSideEffect, &add_32bit_int_c_func));
+ CFunction is_valid_api_object_c_func =
+ CFunction::Make(FastCApiObject::IsFastCApiObjectFastCallback);
+ api_obj_ctor->PrototypeTemplate()->Set(
+ isolate, "is_fast_c_api_object",
+ FunctionTemplate::New(
+ isolate, FastCApiObject::IsFastCApiObjectSlowCallback,
+ Local<Value>(), signature, 1, ConstructorBehavior::kThrow,
+ SideEffectType::kHasSideEffect, &is_valid_api_object_c_func));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "fast_call_count",
FunctionTemplate::New(isolate, FastCApiObject::FastCallCount,
@@ -227,4 +337,20 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
return api_obj_ctor;
}
+void CreateLeafInterfaceObject(const FunctionCallbackInfo<Value>& info) {
+ if (!info.IsConstructCall()) {
+ info.GetIsolate()->ThrowError(
+ "LeafInterfaceType helper must be constructed with new.");
+ }
+}
+
+Local<FunctionTemplate> Shell::CreateLeafInterfaceTypeTemplate(
+ Isolate* isolate) {
+ Local<FunctionTemplate> leaf_object_ctor =
+ FunctionTemplate::New(isolate, CreateLeafInterfaceObject);
+ leaf_object_ctor->SetClassName(
+ String::NewFromUtf8Literal(isolate, "LeafInterfaceType"));
+ return leaf_object_ctor;
+}
+
} // namespace v8
diff --git a/chromium/v8/src/d8/d8.cc b/chromium/v8/src/d8/d8.cc
index d13e424bb15..8c9a4c3ae60 100644
--- a/chromium/v8/src/d8/d8.cc
+++ b/chromium/v8/src/d8/d8.cc
@@ -32,6 +32,7 @@
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/platform/wrappers.h"
+#include "src/base/sanitizer/msan.h"
#include "src/base/sys-info.h"
#include "src/d8/d8-console.h"
#include "src/d8/d8-platforms.h"
@@ -53,7 +54,6 @@
#include "src/parsing/parsing.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/profiler/profile-generator.h"
-#include "src/sanitizer/msan.h"
#include "src/snapshot/snapshot.h"
#include "src/tasks/cancelable-task.h"
#include "src/trap-handler/trap-handler.h"
@@ -333,11 +333,6 @@ class MultiMappedAllocator : public ArrayBufferAllocatorBase {
v8::Platform* g_default_platform;
std::unique_ptr<v8::Platform> g_platform;
-static Local<Value> Throw(Isolate* isolate, const char* message) {
- return isolate->ThrowException(v8::Exception::Error(
- String::NewFromUtf8(isolate, message).ToLocalChecked()));
-}
-
static MaybeLocal<Value> TryGetValue(v8::Isolate* isolate,
Local<Context> context,
Local<v8::Object> object,
@@ -355,13 +350,13 @@ static Local<Value> GetValue(v8::Isolate* isolate, Local<Context> context,
std::shared_ptr<Worker> GetWorkerFromInternalField(Isolate* isolate,
Local<Object> object) {
if (object->InternalFieldCount() != 1) {
- Throw(isolate, "this is not a Worker");
+ isolate->ThrowError("this is not a Worker");
return nullptr;
}
i::Handle<i::Object> handle = Utils::OpenHandle(*object->GetInternalField(0));
if (handle->IsSmi()) {
- Throw(isolate, "Worker is defunct because main thread is terminating");
+ isolate->ThrowError("Worker is defunct because main thread is terminating");
return nullptr;
}
auto managed = i::Handle<i::Managed<Worker>>::cast(handle);
@@ -696,6 +691,12 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
Local<Context> context(isolate->GetCurrentContext());
ScriptOrigin origin(isolate, name);
+ for (int i = 1; i < options.repeat_compile; ++i) {
+ HandleScope handle_scope(isolate);
+ if (CompileString<Script>(isolate, context, source, origin).IsEmpty()) {
+ return false;
+ }
+ }
Local<Script> script;
if (!CompileString<Script>(isolate, context, source, origin)
.ToLocal(&script)) {
@@ -710,6 +711,9 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
StoreInCodeCache(isolate, source, cached_data);
delete cached_data;
}
+ if (options.compile_only) {
+ return true;
+ }
maybe_result = script->Run(realm);
if (options.code_cache_options ==
ShellOptions::CodeCacheOptions::kProduceCacheAfterExecute) {
@@ -728,7 +732,7 @@ bool Shell::ExecuteString(Isolate* isolate, Local<String> source,
if (options.web_snapshot_config) {
std::vector<std::string> exports;
if (!ReadLines(options.web_snapshot_config, exports)) {
- Throw(isolate, "Web snapshots: unable to read config");
+ isolate->ThrowError("Web snapshots: unable to read config");
CHECK(try_catch.HasCaught());
ReportException(isolate, &try_catch);
return false;
@@ -965,7 +969,8 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
CHECK(specifier_it != d->module_to_specifier_map.end());
msg += "\n imported by " + specifier_it->second;
}
- Throw(isolate, msg.c_str());
+ isolate->ThrowError(
+ v8::String::NewFromUtf8(isolate, msg.c_str()).ToLocalChecked());
return MaybeLocal<Module>();
}
ScriptOrigin origin(
@@ -1024,7 +1029,7 @@ MaybeLocal<Module> Shell::FetchModuleTree(Local<Module> referrer,
context, import_assertions, true);
if (request_module_type == ModuleType::kInvalid) {
- Throw(isolate, "Invalid module type was asserted");
+ isolate->ThrowError("Invalid module type was asserted");
return MaybeLocal<Module>();
}
@@ -1211,7 +1216,7 @@ void Shell::DoHostImportModuleDynamically(void* import_data) {
try_catch.SetVerbose(true);
if (module_type == ModuleType::kInvalid) {
- Throw(isolate, "Invalid module type was asserted");
+ isolate->ThrowError("Invalid module type was asserted");
CHECK(try_catch.HasCaught());
resolver->Reject(realm, try_catch.Exception()).ToChecked();
return;
@@ -1358,7 +1363,7 @@ bool Shell::ExecuteWebSnapshot(Isolate* isolate, const char* file_name) {
std::unique_ptr<uint8_t[]> snapshot_data(
reinterpret_cast<uint8_t*>(ReadChars(absolute_path.c_str(), &length)));
if (length == 0) {
- Throw(isolate, "Error reading the web snapshot");
+ isolate->ThrowError("Error reading the web snapshot");
DCHECK(try_catch.HasCaught());
ReportException(isolate, &try_catch);
return false;
@@ -1468,6 +1473,14 @@ void PerIsolateData::DeleteDynamicImportData(DynamicImportData* data) {
delete data;
}
+Local<FunctionTemplate> PerIsolateData::GetTestApiObjectCtor() const {
+ return test_api_object_ctor_.Get(isolate_);
+}
+
+void PerIsolateData::SetTestApiObjectCtor(Local<FunctionTemplate> ctor) {
+ test_api_object_ctor_.Reset(isolate_, ctor);
+}
+
PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
data_->realm_count_ = 1;
data_->realm_current_ = 0;
@@ -1500,14 +1513,14 @@ int PerIsolateData::RealmFind(Local<Context> context) {
int PerIsolateData::RealmIndexOrThrow(
const v8::FunctionCallbackInfo<v8::Value>& args, int arg_offset) {
if (args.Length() < arg_offset || !args[arg_offset]->IsNumber()) {
- Throw(args.GetIsolate(), "Invalid argument");
+ args.GetIsolate()->ThrowError("Invalid argument");
return -1;
}
int index = args[arg_offset]
->Int32Value(args.GetIsolate()->GetCurrentContext())
.FromMaybe(-1);
if (index < 0 || index >= realm_count_ || realms_[index].IsEmpty()) {
- Throw(args.GetIsolate(), "Invalid realm index");
+ args.GetIsolate()->ThrowError("Invalid realm index");
return -1;
}
return index;
@@ -1565,7 +1578,7 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
PerIsolateData* data = PerIsolateData::Get(isolate);
if (args.Length() < 1 || !args[0]->IsObject()) {
- Throw(args.GetIsolate(), "Invalid argument");
+ args.GetIsolate()->ThrowError("Invalid argument");
return;
}
Local<Object> object =
@@ -1577,7 +1590,7 @@ void Shell::RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Context> creation_context;
if (!object->GetCreationContext().ToLocal(&creation_context)) {
- Throw(args.GetIsolate(), "object doesn't have creation context");
+ args.GetIsolate()->ThrowError("object doesn't have creation context");
return;
}
int index = data->RealmFind(creation_context);
@@ -1661,7 +1674,7 @@ void Shell::RealmNavigate(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (index == -1) return;
if (index == 0 || index == data->realm_current_ ||
index == data->realm_switch_) {
- Throw(args.GetIsolate(), "Invalid realm index");
+ args.GetIsolate()->ThrowError("Invalid realm index");
return;
}
@@ -1690,7 +1703,7 @@ void Shell::RealmDetachGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (index == -1) return;
if (index == 0 || index == data->realm_current_ ||
index == data->realm_switch_) {
- Throw(args.GetIsolate(), "Invalid realm index");
+ args.GetIsolate()->ThrowError("Invalid realm index");
return;
}
@@ -1707,7 +1720,7 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (index == -1) return;
if (index == 0 || index == data->realm_current_ ||
index == data->realm_switch_) {
- Throw(args.GetIsolate(), "Invalid realm index");
+ args.GetIsolate()->ThrowError("Invalid realm index");
return;
}
DisposeRealm(args, index);
@@ -1729,7 +1742,7 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
int index = data->RealmIndexOrThrow(args, 0);
if (index == -1) return;
if (args.Length() < 2 || !args[1]->IsString()) {
- Throw(args.GetIsolate(), "Invalid argument");
+ args.GetIsolate()->ThrowError("Invalid argument");
return;
}
ScriptOrigin origin(isolate,
@@ -1780,18 +1793,18 @@ void Shell::LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args) {
std::string file_name = i_isolate->logger()->file_name();
if (!i::Log::IsLoggingToTemporaryFile(file_name)) {
- Throw(isolate, "Only capturing from temporary files is supported.");
+ isolate->ThrowError("Only capturing from temporary files is supported.");
return;
}
if (!i_isolate->logger()->is_logging()) {
- Throw(isolate, "Logging not enabled.");
+ isolate->ThrowError("Logging not enabled.");
return;
}
std::string raw_log;
FILE* log_file = i_isolate->logger()->TearDownAndGetLogFile();
if (!log_file) {
- Throw(isolate, "Log file does not exist.");
+ isolate->ThrowError("Log file does not exist.");
return;
}
@@ -1800,7 +1813,7 @@ void Shell::LogGetAndStop(const v8::FunctionCallbackInfo<v8::Value>& args) {
base::Fclose(log_file);
if (!exists) {
- Throw(isolate, "Unable to read log file.");
+ isolate->ThrowError("Unable to read log file.");
return;
}
Local<String> result =
@@ -1816,13 +1829,13 @@ void Shell::TestVerifySourcePositions(
Isolate* isolate = args.GetIsolate();
// Check if the argument is a valid function.
if (args.Length() != 1) {
- Throw(isolate, "Expected function as single argument.");
+ isolate->ThrowError("Expected function as single argument.");
return;
}
auto arg_handle = Utils::OpenHandle(*args[0]);
if (!arg_handle->IsHeapObject() || !i::Handle<i::HeapObject>::cast(arg_handle)
->IsJSFunctionOrBoundFunction()) {
- Throw(isolate, "Expected function as single argument.");
+ isolate->ThrowError("Expected function as single argument.");
return;
}
@@ -1839,7 +1852,7 @@ void Shell::TestVerifySourcePositions(
i::Handle<i::JSFunction> function = i::Handle<i::JSFunction>::cast(callable);
if (!function->shared().HasBytecodeArray()) {
- Throw(isolate, "Function has no BytecodeArray attached.");
+ isolate->ThrowError("Function has no BytecodeArray attached.");
return;
}
i::Handle<i::BytecodeArray> bytecodes =
@@ -1865,7 +1878,7 @@ void Shell::TestVerifySourcePositions(
if (has_baseline) {
if (offset_iterator->current_bytecode_offset() !=
bytecode_iterator.current_offset()) {
- Throw(isolate, "Baseline bytecode offset mismatch.");
+ isolate->ThrowError("Baseline bytecode offset mismatch.");
return;
}
// Check that we map every address to this bytecode correctly.
@@ -1877,7 +1890,8 @@ void Shell::TestVerifySourcePositions(
pc_lookup.AdvanceToPCOffset(pc);
if (pc_lookup.current_bytecode_offset() !=
bytecode_iterator.current_offset()) {
- Throw(isolate, "Baseline bytecode offset mismatch for PC lookup.");
+ isolate->ThrowError(
+ "Baseline bytecode offset mismatch for PC lookup.");
return;
}
}
@@ -1885,14 +1899,14 @@ void Shell::TestVerifySourcePositions(
bytecode_iterator.Advance();
if (has_baseline && !bytecode_iterator.done()) {
if (offset_iterator->done()) {
- Throw(isolate, "Missing bytecode(s) in baseline offset mapping.");
+ isolate->ThrowError("Missing bytecode(s) in baseline offset mapping.");
return;
}
offset_iterator->Advance();
}
}
if (has_baseline && !offset_iterator->done()) {
- Throw(isolate, "Excess offsets in baseline offset mapping.");
+ isolate->ThrowError("Excess offsets in baseline offset mapping.");
return;
}
}
@@ -1926,6 +1940,27 @@ void Shell::AsyncHooksTriggerAsyncId(
PerIsolateData::Get(isolate)->GetAsyncHooks()->GetTriggerAsyncId()));
}
+void Shell::SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ if (i::FLAG_correctness_fuzzer_suppressions) {
+ // Make sure we have no pending promises if correctness fuzzing is active.
+ // Due to fast-paths we might have not created all intermediate promises
+ // that aren't spec visible. However, the promise hook might expose them
+ // and cause different output.
+ isolate->PerformMicrotaskCheckpoint();
+ }
+ Local<Context> context = isolate->GetCurrentContext();
+ HandleScope handle_scope(isolate);
+
+ context->SetPromiseHooks(
+ args[0]->IsFunction() ? args[0].As<Function>() : Local<Function>(),
+ args[1]->IsFunction() ? args[1].As<Function>() : Local<Function>(),
+ args[2]->IsFunction() ? args[2].As<Function>() : Local<Function>(),
+ args[3]->IsFunction() ? args[3].As<Function>() : Local<Function>());
+
+ args.GetReturnValue().Set(v8::Undefined(isolate));
+}
+
void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope(args.GetIsolate());
@@ -1939,7 +1974,7 @@ void WriteToFile(FILE* file, const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<String> str_obj;
if (arg->IsSymbol()) {
- arg = arg.As<Symbol>()->Description();
+ arg = arg.As<Symbol>()->Description(args.GetIsolate());
}
if (!arg->ToString(args.GetIsolate()->GetCurrentContext())
.ToLocal(&str_obj)) {
@@ -1978,7 +2013,7 @@ void Shell::Write(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value file(args.GetIsolate(), args[0]);
if (*file == nullptr) {
- Throw(args.GetIsolate(), "Error loading file");
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
if (args.Length() == 2) {
@@ -1990,7 +2025,7 @@ void Shell::Read(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- Throw(args.GetIsolate(), "Error loading file");
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
args.GetReturnValue().Set(source);
@@ -2038,12 +2073,12 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(args.GetIsolate());
String::Utf8Value file(args.GetIsolate(), args[i]);
if (*file == nullptr) {
- Throw(args.GetIsolate(), "Error loading file");
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
Local<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- Throw(args.GetIsolate(), "Error loading file");
+ args.GetIsolate()->ThrowError("Error loading file");
return;
}
if (!ExecuteString(
@@ -2052,7 +2087,7 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
kNoPrintResult,
options.quiet_load ? kNoReportExceptions : kReportExceptions,
kNoProcessMessageQueue)) {
- Throw(args.GetIsolate(), "Error executing file");
+ args.GetIsolate()->ThrowError("Error executing file");
return;
}
}
@@ -2115,7 +2150,7 @@ bool FunctionAndArgumentsToString(Local<Function> function,
function->FunctionProtoToString(context);
Local<String> function_string;
if (!maybe_function_string.ToLocal(&function_string)) {
- Throw(isolate, "Failed to convert function to string");
+ isolate->ThrowError("Failed to convert function to string");
return false;
}
*source = String::NewFromUtf8Literal(isolate, "(");
@@ -2124,7 +2159,7 @@ bool FunctionAndArgumentsToString(Local<Function> function,
*source = String::Concat(isolate, *source, middle);
if (!arguments.IsEmpty() && !arguments->IsUndefined()) {
if (!arguments->IsArray()) {
- Throw(isolate, "'arguments' must be an array");
+ isolate->ThrowError("'arguments' must be an array");
return false;
}
Local<String> comma = String::NewFromUtf8Literal(isolate, ",");
@@ -2136,12 +2171,12 @@ bool FunctionAndArgumentsToString(Local<Function> function,
MaybeLocal<Value> maybe_argument = array->Get(context, i);
Local<Value> argument;
if (!maybe_argument.ToLocal(&argument)) {
- Throw(isolate, "Failed to get argument");
+ isolate->ThrowError("Failed to get argument");
return false;
}
Local<String> argument_string;
if (!JSON::Stringify(context, argument).ToLocal(&argument_string)) {
- Throw(isolate, "Failed to convert argument to string");
+ isolate->ThrowError("Failed to convert argument to string");
return false;
}
*source = String::Concat(isolate, *source, argument_string);
@@ -2156,7 +2191,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope handle_scope(isolate);
if (args.Length() < 1 || (!args[0]->IsString() && !args[0]->IsFunction())) {
- Throw(isolate, "1st argument must be a string or a function");
+ isolate->ThrowError("1st argument must be a string or a function");
return;
}
@@ -2170,7 +2205,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Value> arguments;
ReadWorkerTypeAndArguments(args, &worker_type, &arguments);
if (worker_type != WorkerType::kFunction) {
- Throw(isolate, "Invalid or missing worker type");
+ isolate->ThrowError("Invalid or missing worker type");
return;
}
@@ -2189,7 +2224,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
load_from_file = false;
} else if (worker_type != WorkerType::kNone &&
worker_type != WorkerType::kClassic) {
- Throw(isolate, "Invalid worker type");
+ isolate->ThrowError("Invalid worker type");
return;
}
@@ -2197,7 +2232,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value filename(isolate, args[0]);
source = ReadFile(isolate, *filename);
if (source.IsEmpty()) {
- Throw(args.GetIsolate(), "Error loading worker script");
+ args.GetIsolate()->ThrowError("Error loading worker script");
return;
}
} else {
@@ -2206,7 +2241,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
if (!args.IsConstructCall()) {
- Throw(isolate, "Worker must be constructed with new");
+ isolate->ThrowError("Worker must be constructed with new");
return;
}
@@ -2223,7 +2258,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value script(isolate, source);
if (!*script) {
- Throw(isolate, "Can't get worker script");
+ isolate->ThrowError("Can't get worker script");
return;
}
@@ -2237,7 +2272,7 @@ void Shell::WorkerNew(const v8::FunctionCallbackInfo<v8::Value>& args) {
i_isolate, kWorkerSizeEstimate, worker);
args.Holder()->SetInternalField(0, Utils::ToLocal(managed));
if (!Worker::StartWorkerThread(std::move(worker))) {
- Throw(isolate, "Can't start thread");
+ isolate->ThrowError("Can't start thread");
return;
}
}
@@ -2248,7 +2283,7 @@ void Shell::WorkerPostMessage(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(isolate);
if (args.Length() < 1) {
- Throw(isolate, "Invalid argument");
+ isolate->ThrowError("Invalid argument");
return;
}
@@ -2557,6 +2592,33 @@ Local<String> Shell::Stringify(Isolate* isolate, Local<Value> value) {
return result.ToLocalChecked().As<String>();
}
+void Shell::NodeTypeCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ args.GetReturnValue().Set(v8::Number::New(isolate, 1));
+}
+
+Local<FunctionTemplate> Shell::CreateNodeTemplates(Isolate* isolate) {
+ Local<FunctionTemplate> node = FunctionTemplate::New(isolate);
+ Local<ObjectTemplate> proto_template = node->PrototypeTemplate();
+ Local<Signature> signature = v8::Signature::New(isolate, node);
+ Local<FunctionTemplate> nodeType = FunctionTemplate::New(
+ isolate, NodeTypeCallback, Local<Value>(), signature);
+ nodeType->SetAcceptAnyReceiver(false);
+ proto_template->SetAccessorProperty(
+ String::NewFromUtf8Literal(isolate, "nodeType"), nodeType);
+
+ Local<FunctionTemplate> element = FunctionTemplate::New(isolate);
+ element->Inherit(node);
+
+ Local<FunctionTemplate> html_element = FunctionTemplate::New(isolate);
+ html_element->Inherit(element);
+
+ Local<FunctionTemplate> div_element = FunctionTemplate::New(isolate);
+ div_element->Inherit(html_element);
+
+ return div_element;
+}
+
Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Local<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
global_template->Set(Symbol::GetToStringTag(isolate),
@@ -2588,6 +2650,7 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
global_template->Set(isolate, "performance",
Shell::CreatePerformanceTemplate(isolate));
global_template->Set(isolate, "Worker", Shell::CreateWorkerTemplate(isolate));
+
// Prevent fuzzers from creating side effects.
if (!i::FLAG_fuzzing) {
global_template->Set(isolate, "os", Shell::CreateOSTemplate(isolate));
@@ -2716,23 +2779,41 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
FunctionTemplate::New(isolate, LogGetAndStop));
d8_template->Set(isolate, "log", log_template);
+
+ Local<ObjectTemplate> dom_template = ObjectTemplate::New(isolate);
+ dom_template->Set(isolate, "Div", Shell::CreateNodeTemplates(isolate));
+ d8_template->Set(isolate, "dom", dom_template);
}
{
Local<ObjectTemplate> test_template = ObjectTemplate::New(isolate);
- test_template->Set(
- isolate, "verifySourcePositions",
- FunctionTemplate::New(isolate, TestVerifySourcePositions));
+ // For different runs of correctness fuzzing the bytecode of a function
+ // might get flushed, resulting in spurious errors.
+ if (!i::FLAG_correctness_fuzzer_suppressions) {
+ test_template->Set(
+ isolate, "verifySourcePositions",
+ FunctionTemplate::New(isolate, TestVerifySourcePositions));
+ }
// Correctness fuzzing will attempt to compare results of tests with and
// without turbo_fast_api_calls, so we don't expose the fast_c_api
// constructor when --correctness_fuzzer_suppressions is on.
if (i::FLAG_turbo_fast_api_calls &&
!i::FLAG_correctness_fuzzer_suppressions) {
- test_template->Set(isolate, "fast_c_api",
+ test_template->Set(isolate, "FastCAPI",
Shell::CreateTestFastCApiTemplate(isolate));
+ test_template->Set(isolate, "LeafInterfaceType",
+ Shell::CreateLeafInterfaceTypeTemplate(isolate));
}
d8_template->Set(isolate, "test", test_template);
}
+ {
+ Local<ObjectTemplate> promise_template = ObjectTemplate::New(isolate);
+ promise_template->Set(
+ isolate, "setHooks",
+ FunctionTemplate::New(isolate, SetPromiseHooks, Local<Value>(),
+ Local<Signature>(), 4));
+ d8_template->Set(isolate, "promise", promise_template);
+ }
return d8_template;
}
@@ -3187,13 +3268,13 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
String::Utf8Value filename(isolate, args[0]);
int length;
if (*filename == nullptr) {
- Throw(isolate, "Error loading file");
+ isolate->ThrowError("Error loading file");
return;
}
uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == nullptr) {
- Throw(isolate, "Error reading file");
+ isolate->ThrowError("Error reading file");
return;
}
std::unique_ptr<v8::BackingStore> backing_store =
@@ -3860,7 +3941,7 @@ void Worker::PostMessageOut(const v8::FunctionCallbackInfo<v8::Value>& args) {
HandleScope handle_scope(isolate);
if (args.Length() < 1) {
- Throw(isolate, "Invalid argument");
+ isolate->ThrowError("Invalid argument");
return;
}
@@ -3901,25 +3982,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
strcmp(argv[i], "--no-stress-opt") == 0) {
options.stress_opt = false;
argv[i] = nullptr;
- } else if (strcmp(argv[i], "--stress-snapshot") == 0) {
- options.stress_snapshot = true;
- // Incremental marking is incompatible with the stress_snapshot mode;
- // specifically, serialization may clear bytecode arrays from shared
- // function infos which the MarkCompactCollector (running concurrently)
- // may still need. See also https://crbug.com/v8/10882.
- //
- // We thus force the implication
- //
- // --stress-snapshot ~~> --no-incremental-marking
- //
- // Note: This is not an issue in production because we don't clear SFI's
- // there (that only happens in mksnapshot and in --stress-snapshot mode).
- i::FLAG_incremental_marking = false;
- argv[i] = nullptr;
- } else if (strcmp(argv[i], "--nostress-snapshot") == 0 ||
- strcmp(argv[i], "--no-stress-snapshot") == 0) {
- options.stress_snapshot = false;
- argv[i] = nullptr;
} else if (strcmp(argv[i], "--noalways-opt") == 0 ||
strcmp(argv[i], "--no-always-opt") == 0) {
no_always_opt = true;
@@ -4058,6 +4120,12 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strncmp(argv[i], "--web-snapshot-config=", 22) == 0) {
options.web_snapshot_config = argv[i] + 22;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--compile-only") == 0) {
+ options.compile_only = true;
+ argv[i] = nullptr;
+ } else if (strncmp(argv[i], "--repeat-compile=", 17) == 0) {
+ options.repeat_compile = atoi(argv[i] + 17);
+ argv[i] = nullptr;
#ifdef V8_FUZZILLI
} else if (strcmp(argv[i], "--no-fuzzilli-enable-builtins-coverage") == 0) {
options.fuzzilli_enable_builtins_coverage = false;
@@ -4069,11 +4137,15 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--fuzzy-module-file-extensions") == 0) {
options.fuzzy_module_file_extensions = true;
argv[i] = nullptr;
-#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
} else if (strcmp(argv[i], "--enable-system-instrumentation") == 0) {
options.enable_system_instrumentation = true;
options.trace_enabled = true;
+#if defined(V8_OS_WIN)
+ // Guard this bc the flag has a lot of overhead and is not currently used
+ // by macos
i::FLAG_interpreted_frames_native_stack = true;
+#endif
argv[i] = nullptr;
#endif
}
@@ -4164,7 +4236,7 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
DisposeModuleEmbedderData(context);
}
WriteLcovData(isolate, options.lcov_file);
- if (last_run && options.stress_snapshot) {
+ if (last_run && i::FLAG_stress_snapshot) {
static constexpr bool kClearRecompilableData = true;
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
@@ -4426,7 +4498,8 @@ class Serializer : public ValueSerializer::Delegate {
Local<Value> element;
if (transfer_array->Get(context, i).ToLocal(&element)) {
if (!element->IsArrayBuffer()) {
- Throw(isolate_, "Transfer array elements must be an ArrayBuffer");
+ isolate_->ThrowError(
+ "Transfer array elements must be an ArrayBuffer");
return Nothing<bool>();
}
@@ -4434,8 +4507,8 @@ class Serializer : public ValueSerializer::Delegate {
if (std::find(array_buffers_.begin(), array_buffers_.end(),
array_buffer) != array_buffers_.end()) {
- Throw(isolate_,
- "ArrayBuffer occurs in the transfer array more than once");
+ isolate_->ThrowError(
+ "ArrayBuffer occurs in the transfer array more than once");
return Nothing<bool>();
}
@@ -4450,7 +4523,7 @@ class Serializer : public ValueSerializer::Delegate {
} else if (transfer->IsUndefined()) {
return Just(true);
} else {
- Throw(isolate_, "Transfer list must be an Array or undefined");
+ isolate_->ThrowError("Transfer list must be an Array or undefined");
return Nothing<bool>();
}
}
@@ -4460,7 +4533,7 @@ class Serializer : public ValueSerializer::Delegate {
Local<ArrayBuffer> array_buffer =
Local<ArrayBuffer>::New(isolate_, global_array_buffer);
if (!array_buffer->IsDetachable()) {
- Throw(isolate_, "ArrayBuffer could not be transferred");
+ isolate_->ThrowError("ArrayBuffer could not be transferred");
return Nothing<bool>();
}
diff --git a/chromium/v8/src/d8/d8.h b/chromium/v8/src/d8/d8.h
index d5f13817d35..75c80461156 100644
--- a/chromium/v8/src/d8/d8.h
+++ b/chromium/v8/src/d8/d8.h
@@ -271,6 +271,9 @@ class PerIsolateData {
void AddDynamicImportData(DynamicImportData*);
void DeleteDynamicImportData(DynamicImportData*);
+ Local<FunctionTemplate> GetTestApiObjectCtor() const;
+ void SetTestApiObjectCtor(Local<FunctionTemplate> ctor);
+
private:
friend class Shell;
friend class RealmScope;
@@ -289,6 +292,7 @@ class PerIsolateData {
#if defined(LEAK_SANITIZER)
std::unordered_set<DynamicImportData*> import_data_;
#endif
+ Global<FunctionTemplate> test_api_object_ctor_;
int RealmIndexOrThrow(const v8::FunctionCallbackInfo<v8::Value>& args,
int arg_offset);
@@ -358,7 +362,6 @@ class ShellOptions {
DisallowReassignment<bool> simulate_errors = {"simulate-errors", false};
DisallowReassignment<bool> stress_opt = {"stress-opt", false};
DisallowReassignment<int> stress_runs = {"stress-runs", 1};
- DisallowReassignment<bool> stress_snapshot = {"stress-snapshot", false};
DisallowReassignment<bool> interactive_shell = {"shell", false};
bool test_shell = false;
DisallowReassignment<bool> expected_to_throw = {"throws", false};
@@ -403,6 +406,8 @@ class ShellOptions {
"enable-system-instrumentation", false};
DisallowReassignment<const char*> web_snapshot_config = {
"web-snapshot-config", nullptr};
+ DisallowReassignment<bool> compile_only = {"compile-only", false};
+ DisallowReassignment<int> repeat_compile = {"repeat-compile", 1};
};
class Shell : public i::AllStatic {
@@ -483,6 +488,8 @@ class Shell : public i::AllStatic {
static void AsyncHooksTriggerAsyncId(
const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void SetPromiseHooks(const v8::FunctionCallbackInfo<v8::Value>& args);
+
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
static void PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -624,6 +631,9 @@ class Shell : public i::AllStatic {
static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
+ static void NodeTypeCallback(const v8::FunctionCallbackInfo<v8::Value>& args);
+
+ static Local<FunctionTemplate> CreateNodeTemplates(Isolate* isolate);
static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static Local<ObjectTemplate> CreateOSTemplate(Isolate* isolate);
static Local<FunctionTemplate> CreateWorkerTemplate(Isolate* isolate);
@@ -633,6 +643,8 @@ class Shell : public i::AllStatic {
static Local<ObjectTemplate> CreateRealmTemplate(Isolate* isolate);
static Local<ObjectTemplate> CreateD8Template(Isolate* isolate);
static Local<FunctionTemplate> CreateTestFastCApiTemplate(Isolate* isolate);
+ static Local<FunctionTemplate> CreateLeafInterfaceTypeTemplate(
+ Isolate* isolate);
static MaybeLocal<Context> CreateRealm(
const v8::FunctionCallbackInfo<v8::Value>& args, int index,
diff --git a/chromium/v8/src/debug/arm/debug-arm.cc b/chromium/v8/src/debug/arm/debug-arm.cc
deleted file mode 100644
index 238bc5b85d4..00000000000
--- a/chromium/v8/src/debug/arm/debug-arm.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ Ret();
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by r1.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
- __ mov(fp, r1);
- __ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ ldr(r0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- __ LeaveFrame(StackFrame::INTERNAL);
-
- // The arguments are already in the stack (including any necessary padding),
- // we should not try to massage the arguments again.
- __ mov(r2, Operand(kDontAdaptArgumentsSentinel));
- __ InvokeFunction(r1, r2, r0, JUMP_FUNCTION);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/chromium/v8/src/debug/arm64/debug-arm64.cc b/chromium/v8/src/debug/arm64/debug-arm64.cc
deleted file mode 100644
index b12d235983c..00000000000
--- a/chromium/v8/src/debug/arm64/debug-arm64.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/arm64/macro-assembler-arm64-inl.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frame-constants.h"
-#include "src/execution/frames-inl.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ Ret();
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by x1.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
- __ Mov(fp, x1);
- __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ ldr(x0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-
- __ Mov(sp, fp);
- __ Pop<TurboAssembler::kAuthLR>(fp, lr);
-
- // The arguments are already in the stack (including any necessary padding),
- // we should not try to massage the arguments again.
- __ Mov(x3, kDontAdaptArgumentsSentinel);
- __ InvokeFunctionWithNewTarget(x1, x3, x0, JUMP_FUNCTION);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-} // namespace internal
-} // namespace v8
-
-#undef __
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/chromium/v8/src/debug/debug-coverage.cc b/chromium/v8/src/debug/debug-coverage.cc
index 56933602a65..7ff5809a18a 100644
--- a/chromium/v8/src/debug/debug-coverage.cc
+++ b/chromium/v8/src/debug/debug-coverage.cc
@@ -61,8 +61,7 @@ bool CompareCoverageBlock(const CoverageBlock& a, const CoverageBlock& b) {
return a.start < b.start;
}
-void SortBlockData(
- std::vector<CoverageBlock>& v) { // NOLINT(runtime/references)
+void SortBlockData(std::vector<CoverageBlock>& v) {
// Sort according to the block nesting structure.
std::sort(v.begin(), v.end(), CompareCoverageBlock);
}
diff --git a/chromium/v8/src/debug/debug-evaluate.cc b/chromium/v8/src/debug/debug-evaluate.cc
index 4f317fcc89b..54fac88f59b 100644
--- a/chromium/v8/src/debug/debug-evaluate.cc
+++ b/chromium/v8/src/debug/debug-evaluate.cc
@@ -43,6 +43,22 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
Handle<String> source,
debug::EvaluateGlobalMode mode,
REPLMode repl_mode) {
+ Handle<SharedFunctionInfo> shared_info;
+ if (!GetFunctionInfo(isolate, source, repl_mode).ToHandle(&shared_info)) {
+ return MaybeHandle<Object>();
+ }
+
+ Handle<NativeContext> context = isolate->native_context();
+ Handle<JSFunction> fun =
+ Factory::JSFunctionBuilder{isolate, shared_info, context}.Build();
+
+ return Global(isolate, fun, mode, repl_mode);
+}
+
+MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
+ Handle<JSFunction> function,
+ debug::EvaluateGlobalMode mode,
+ REPLMode repl_mode) {
// Disable breaks in side-effect free mode.
DisableBreak disable_break_scope(
isolate->debug(),
@@ -50,19 +66,14 @@ MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
mode ==
debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect);
- Handle<SharedFunctionInfo> shared_info;
- if (!GetFunctionInfo(isolate, source, repl_mode).ToHandle(&shared_info)) {
- return MaybeHandle<Object>();
- }
+ Handle<NativeContext> context = isolate->native_context();
+ CHECK_EQ(function->native_context(), *context);
- Handle<Context> context = isolate->native_context();
- Handle<JSFunction> fun =
- Factory::JSFunctionBuilder{isolate, shared_info, context}.Build();
if (mode == debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect) {
isolate->debug()->StartSideEffectCheckMode();
}
MaybeHandle<Object> result = Execution::Call(
- isolate, fun, Handle<JSObject>(context->global_proxy(), isolate), 0,
+ isolate, function, Handle<JSObject>(context->global_proxy(), isolate), 0,
nullptr);
if (mode == debug::EvaluateGlobalMode::kDisableBreaksAndThrowOnSideEffect) {
isolate->debug()->StopSideEffectCheckMode();
@@ -1108,7 +1119,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
}
CHECK(!failed);
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
- defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_MIPS64)
// Isolate-independent builtin calls and jumps do not emit reloc infos
// on PPC. We try to avoid using PC relative code due to performance
// issue with especially older hardwares.
diff --git a/chromium/v8/src/debug/debug-evaluate.h b/chromium/v8/src/debug/debug-evaluate.h
index 03836dd6dad..34a6c8d4c75 100644
--- a/chromium/v8/src/debug/debug-evaluate.h
+++ b/chromium/v8/src/debug/debug-evaluate.h
@@ -27,6 +27,10 @@ class DebugEvaluate : public AllStatic {
debug::EvaluateGlobalMode mode,
REPLMode repl_mode = REPLMode::kNo);
+ static V8_EXPORT_PRIVATE MaybeHandle<Object> Global(
+ Isolate* isolate, Handle<JSFunction> function,
+ debug::EvaluateGlobalMode mode, REPLMode repl_mode = REPLMode::kNo);
+
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. Things that need special attention are:
// - Parameters and stack-allocated locals need to be materialized. Altered
diff --git a/chromium/v8/src/debug/debug-frames.cc b/chromium/v8/src/debug/debug-frames.cc
index 2e5c9443cab..95b68014816 100644
--- a/chromium/v8/src/debug/debug-frames.cc
+++ b/chromium/v8/src/debug/debug-frames.cc
@@ -7,6 +7,10 @@
#include "src/builtins/accessors.h"
#include "src/execution/frames-inl.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/debug/debug-wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -21,7 +25,6 @@ FrameInspector::FrameInspector(CommonFrame* frame, int inlined_frame_index,
is_constructor_ = summary.is_constructor();
source_position_ = summary.SourcePosition();
- function_name_ = summary.FunctionName();
script_ = Handle<Script>::cast(summary.script());
receiver_ = summary.receiver();
@@ -70,6 +73,18 @@ Handle<Object> FrameInspector::GetContext() {
: handle(frame_->context(), isolate_);
}
+Handle<String> FrameInspector::GetFunctionName() {
+#if V8_ENABLE_WEBASSEMBLY
+ if (IsWasm()) {
+ auto wasm_frame = WasmFrame::cast(frame_);
+ auto wasm_instance = handle(wasm_frame->wasm_instance(), isolate_);
+ return GetWasmFunctionDebugName(isolate_, wasm_instance,
+ wasm_frame->function_index());
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ return JSFunction::GetDebugName(function_);
+}
+
#if V8_ENABLE_WEBASSEMBLY
bool FrameInspector::IsWasm() { return frame_->is_wasm(); }
#endif // V8_ENABLE_WEBASSEMBLY
diff --git a/chromium/v8/src/debug/debug-frames.h b/chromium/v8/src/debug/debug-frames.h
index 03f670e4995..5197f862988 100644
--- a/chromium/v8/src/debug/debug-frames.h
+++ b/chromium/v8/src/debug/debug-frames.h
@@ -36,7 +36,7 @@ class FrameInspector {
Handle<Object> GetContext();
Handle<Object> GetReceiver() { return receiver_; }
- Handle<String> GetFunctionName() { return function_name_; }
+ Handle<String> GetFunctionName();
#if V8_ENABLE_WEBASSEMBLY
bool IsWasm();
@@ -58,7 +58,6 @@ class FrameInspector {
Handle<Script> script_;
Handle<Object> receiver_;
Handle<JSFunction> function_;
- Handle<String> function_name_;
int source_position_ = -1;
bool is_optimized_ = false;
bool is_constructor_ = false;
diff --git a/chromium/v8/src/debug/debug-interface.cc b/chromium/v8/src/debug/debug-interface.cc
index bf74d379b6a..bc545a95d4c 100644
--- a/chromium/v8/src/debug/debug-interface.cc
+++ b/chromium/v8/src/debug/debug-interface.cc
@@ -12,7 +12,9 @@
#include "src/debug/debug.h"
#include "src/execution/vm-state-inl.h"
#include "src/objects/js-generator-inl.h"
+#include "src/objects/stack-frame-info-inl.h"
#include "src/regexp/regexp-stack.h"
+#include "src/strings/string-builder-inl.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/debug/debug-wasm-objects-inl.h"
@@ -43,6 +45,54 @@ v8_inspector::V8Inspector* GetInspector(Isolate* isolate) {
return reinterpret_cast<i::Isolate*>(isolate)->inspector();
}
+Local<String> GetFunctionDebugName(Local<StackFrame> frame) {
+#if V8_ENABLE_WEBASSEMBLY
+ auto info = Utils::OpenHandle(*frame);
+ if (info->IsWasm()) {
+ auto isolate = info->GetIsolate();
+ auto instance = handle(info->GetWasmInstance(), isolate);
+ auto func_index = info->GetWasmFunctionIndex();
+ return Utils::ToLocal(
+ i::GetWasmFunctionDebugName(isolate, instance, func_index));
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ return frame->GetFunctionName();
+}
+
+Local<String> GetFunctionDescription(Local<Function> function) {
+ auto receiver = Utils::OpenHandle(*function);
+ if (receiver->IsJSBoundFunction()) {
+ return Utils::ToLocal(i::JSBoundFunction::ToString(
+ i::Handle<i::JSBoundFunction>::cast(receiver)));
+ }
+ if (receiver->IsJSFunction()) {
+ auto function = i::Handle<i::JSFunction>::cast(receiver);
+#if V8_ENABLE_WEBASSEMBLY
+ if (function->shared().HasWasmExportedFunctionData()) {
+ auto isolate = function->GetIsolate();
+ auto func_index =
+ function->shared().wasm_exported_function_data().function_index();
+ auto instance = i::handle(
+ function->shared().wasm_exported_function_data().instance(), isolate);
+ if (instance->module()->origin == i::wasm::kWasmOrigin) {
+ // For asm.js functions, we can still print the source
+ // code (hopefully), so don't bother with them here.
+ auto debug_name =
+ i::GetWasmFunctionDebugName(isolate, instance, func_index);
+ i::IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("function ");
+ builder.AppendString(debug_name);
+ builder.AppendCString("() { [native code] }");
+ return Utils::ToLocal(builder.Finish().ToHandleChecked());
+ }
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ return Utils::ToLocal(i::JSFunction::ToString(function));
+ }
+ return Utils::ToLocal(
+ receiver->GetIsolate()->factory()->function_native_code_string());
+}
+
void SetBreakOnNextFunctionCall(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->debug()->SetBreakOnNextFunctionCall();
}
@@ -901,6 +951,21 @@ MaybeLocal<v8::Value> EvaluateGlobal(v8::Isolate* isolate,
RETURN_ESCAPED(result);
}
+v8::MaybeLocal<v8::Value> EvaluateGlobalForTesting(
+ v8::Isolate* isolate, v8::Local<v8::Script> function,
+ v8::debug::EvaluateGlobalMode mode, bool repl) {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
+ i::REPLMode repl_mode = repl ? i::REPLMode::kYes : i::REPLMode::kNo;
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(
+ i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*function),
+ mode, repl_mode),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
void QueryObjects(v8::Local<v8::Context> v8_context,
QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
@@ -944,10 +1009,12 @@ int64_t GetNextRandomInt64(v8::Isolate* v8_isolate) {
void EnumerateRuntimeCallCounters(v8::Isolate* v8_isolate,
RuntimeCallCounterCallback callback) {
+#ifdef V8_RUNTIME_CALL_STATS
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
if (isolate->counters()) {
isolate->counters()->runtime_call_stats()->EnumerateCounters(callback);
}
+#endif // V8_RUNTIME_CALL_STATS
}
int GetDebuggingId(v8::Local<v8::Function> function) {
diff --git a/chromium/v8/src/debug/debug-interface.h b/chromium/v8/src/debug/debug-interface.h
index 66c7f3997e8..6c7ff9cbdde 100644
--- a/chromium/v8/src/debug/debug-interface.h
+++ b/chromium/v8/src/debug/debug-interface.h
@@ -7,7 +7,6 @@
#include <memory>
-#include "include/v8-inspector.h"
#include "include/v8-util.h"
#include "include/v8.h"
#include "src/base/platform/time.h"
@@ -15,6 +14,10 @@
#include "src/debug/interface-types.h"
#include "src/utils/vector.h"
+namespace v8_inspector {
+class V8Inspector;
+} // namespace v8_inspector
+
namespace v8 {
namespace internal {
@@ -38,6 +41,16 @@ int GetContextId(Local<Context> context);
void SetInspector(Isolate* isolate, v8_inspector::V8Inspector*);
v8_inspector::V8Inspector* GetInspector(Isolate* isolate);
+// Returns the debug name for the function, which is supposed to be used
+// by the debugger and the developer tools. This can thus be different from
+// the name returned by the StackFrame::GetFunctionName() method. For example,
+// in case of WebAssembly, the debug name is WAT-compatible and thus always
+// preceeded by a dollar ('$').
+Local<String> GetFunctionDebugName(Local<StackFrame> frame);
+
+// Returns a debug string representation of the function.
+Local<String> GetFunctionDescription(Local<Function> function);
+
// Schedule a debugger break to happen when function is called inside given
// isolate.
V8_EXPORT_PRIVATE void SetBreakOnNextFunctionCall(Isolate* isolate);
@@ -119,11 +132,7 @@ struct LiveEditResult {
OK,
COMPILE_ERROR,
BLOCKED_BY_RUNNING_GENERATOR,
- BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME,
- BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME,
- BLOCKED_BY_ACTIVE_FUNCTION,
- BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME,
- FRAME_RESTART_IS_NOT_SUPPORTED
+ BLOCKED_BY_ACTIVE_FUNCTION
};
Status status = OK;
bool stack_changed = false;
@@ -195,9 +204,8 @@ class WasmScript : public Script {
};
#endif // V8_ENABLE_WEBASSEMBLY
-V8_EXPORT_PRIVATE void GetLoadedScripts(
- Isolate* isolate,
- PersistentValueVector<Script>& scripts); // NOLINT(runtime/references)
+V8_EXPORT_PRIVATE void GetLoadedScripts(Isolate* isolate,
+ PersistentValueVector<Script>& scripts);
MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
Local<String> source);
@@ -480,7 +488,6 @@ class V8_EXPORT_PRIVATE StackTraceIterator {
virtual v8::Local<v8::Function> GetFunction() const = 0;
virtual std::unique_ptr<ScopeIterator> GetScopeIterator() const = 0;
- virtual bool Restart() = 0;
virtual v8::MaybeLocal<v8::Value> Evaluate(v8::Local<v8::String> source,
bool throw_on_side_effect) = 0;
};
@@ -523,6 +530,10 @@ V8_EXPORT_PRIVATE v8::MaybeLocal<v8::Value> EvaluateGlobal(
v8::Isolate* isolate, v8::Local<v8::String> source, EvaluateGlobalMode mode,
bool repl_mode = false);
+V8_EXPORT_PRIVATE v8::MaybeLocal<v8::Value> EvaluateGlobalForTesting(
+ v8::Isolate* isolate, v8::Local<v8::Script> function,
+ v8::debug::EvaluateGlobalMode mode, bool repl);
+
int GetDebuggingId(v8::Local<v8::Function> function);
bool SetFunctionBreakpoint(v8::Local<v8::Function> function,
diff --git a/chromium/v8/src/debug/debug-scopes.cc b/chromium/v8/src/debug/debug-scopes.cc
index 03d49fac5ca..5f136d91d3d 100644
--- a/chromium/v8/src/debug/debug-scopes.cc
+++ b/chromium/v8/src/debug/debug-scopes.cc
@@ -192,7 +192,13 @@ class ScopeChainRetriever {
// functions that have the same end position.
const bool position_fits_end =
closure_scope_ ? position_ < end : position_ <= end;
- return start < position_ && position_fits_end;
+ // While we're evaluating a class, the calling function will have a class
+ // context on the stack with a range that starts at Token::CLASS, and the
+ // source position will also point to Token::CLASS. To identify the
+ // matching scope we include start in the accepted range for class scopes.
+ const bool position_fits_start =
+ scope->is_class_scope() ? start <= position_ : start < position_;
+ return position_fits_start && position_fits_end;
}
};
diff --git a/chromium/v8/src/debug/debug-stack-trace-iterator.cc b/chromium/v8/src/debug/debug-stack-trace-iterator.cc
index 9904f781f92..93722d0f16c 100644
--- a/chromium/v8/src/debug/debug-stack-trace-iterator.cc
+++ b/chromium/v8/src/debug/debug-stack-trace-iterator.cc
@@ -172,14 +172,6 @@ DebugStackTraceIterator::GetScopeIterator() const {
return std::make_unique<DebugScopeIterator>(isolate_, frame_inspector_.get());
}
-bool DebugStackTraceIterator::Restart() {
- DCHECK(!Done());
-#if V8_ENABLE_WEBASSEMBLY
- if (iterator_.is_wasm()) return false;
-#endif // V8_ENABLE_WEBASSEMBLY
- return LiveEdit::RestartFrame(iterator_.javascript_frame());
-}
-
v8::MaybeLocal<v8::Value> DebugStackTraceIterator::Evaluate(
v8::Local<v8::String> source, bool throw_on_side_effect) {
DCHECK(!Done());
diff --git a/chromium/v8/src/debug/debug-stack-trace-iterator.h b/chromium/v8/src/debug/debug-stack-trace-iterator.h
index 3319bc15f50..2d059e0ec5c 100644
--- a/chromium/v8/src/debug/debug-stack-trace-iterator.h
+++ b/chromium/v8/src/debug/debug-stack-trace-iterator.h
@@ -31,7 +31,6 @@ class DebugStackTraceIterator final : public debug::StackTraceIterator {
v8::Local<v8::Function> GetFunction() const override;
std::unique_ptr<v8::debug::ScopeIterator> GetScopeIterator() const override;
- bool Restart() override;
v8::MaybeLocal<v8::Value> Evaluate(v8::Local<v8::String> source,
bool throw_on_side_effect) override;
diff --git a/chromium/v8/src/debug/debug-wasm-objects.cc b/chromium/v8/src/debug/debug-wasm-objects.cc
index 070221f433d..39286ed027e 100644
--- a/chromium/v8/src/debug/debug-wasm-objects.cc
+++ b/chromium/v8/src/debug/debug-wasm-objects.cc
@@ -323,15 +323,7 @@ struct FunctionsProxy : NamedDebugProxy<FunctionsProxy, kFunctionsProxy> {
static Handle<String> GetName(Isolate* isolate,
Handle<WasmInstanceObject> instance,
uint32_t index) {
- Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
- MaybeHandle<String> name =
- WasmModuleObject::GetFunctionNameOrNull(isolate, module_object, index);
- if (name.is_null()) {
- name = GetNameFromImportsAndExportsOrNull(
- isolate, instance, wasm::ImportExportKindCode::kExternalFunction,
- index);
- }
- return GetNameOrDefault(isolate, name, "$func", index);
+ return GetWasmFunctionDebugName(isolate, instance, index);
}
};
@@ -1050,78 +1042,75 @@ std::unique_ptr<debug::ScopeIterator> GetWasmScopeIterator(WasmFrame* frame) {
return std::make_unique<DebugWasmScopeIterator>(frame);
}
-Handle<JSArray> GetWasmInstanceObjectInternalProperties(
- Handle<WasmInstanceObject> instance) {
- Isolate* isolate = instance->GetIsolate();
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(2 * 5);
- int length = 0;
+Handle<String> GetWasmFunctionDebugName(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t func_index) {
+ Handle<WasmModuleObject> module_object(instance->module_object(), isolate);
+ MaybeHandle<String> maybe_name = WasmModuleObject::GetFunctionNameOrNull(
+ isolate, module_object, func_index);
+ if (module_object->is_asm_js()) {
+ // In case of asm.js, we use the names from the function declarations.
+ return maybe_name.ToHandleChecked();
+ }
+ if (maybe_name.is_null()) {
+ maybe_name = GetNameFromImportsAndExportsOrNull(
+ isolate, instance, wasm::ImportExportKindCode::kExternalFunction,
+ func_index);
+ }
+ return GetNameOrDefault(isolate, maybe_name, "$func", func_index);
+}
- Handle<String> module_str =
- isolate->factory()->NewStringFromAsciiChecked("[[Module]]");
- Handle<Object> module_obj = handle(instance->module_object(), isolate);
- result->set(length++, *module_str);
- result->set(length++, *module_obj);
+Handle<ArrayList> AddWasmInstanceObjectInternalProperties(
+ Isolate* isolate, Handle<ArrayList> result,
+ Handle<WasmInstanceObject> instance) {
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[Module]]"),
+ handle(instance->module_object(), isolate));
if (FunctionsProxy::Count(isolate, instance) != 0) {
- Handle<String> functions_str =
- isolate->factory()->NewStringFromAsciiChecked("[[Functions]]");
- Handle<Object> functions_obj =
- GetOrCreateInstanceProxy<FunctionsProxy>(isolate, instance);
- result->set(length++, *functions_str);
- result->set(length++, *functions_obj);
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[Functions]]"),
+ GetOrCreateInstanceProxy<FunctionsProxy>(isolate, instance));
}
if (GlobalsProxy::Count(isolate, instance) != 0) {
- Handle<String> globals_str =
- isolate->factory()->NewStringFromAsciiChecked("[[Globals]]");
- Handle<Object> globals_obj =
- GetOrCreateInstanceProxy<GlobalsProxy>(isolate, instance);
- result->set(length++, *globals_str);
- result->set(length++, *globals_obj);
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[Globals]]"),
+ GetOrCreateInstanceProxy<GlobalsProxy>(isolate, instance));
}
if (MemoriesProxy::Count(isolate, instance) != 0) {
- Handle<String> memories_str =
- isolate->factory()->NewStringFromAsciiChecked("[[Memories]]");
- Handle<Object> memories_obj =
- GetOrCreateInstanceProxy<MemoriesProxy>(isolate, instance);
- result->set(length++, *memories_str);
- result->set(length++, *memories_obj);
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[Memories]]"),
+ GetOrCreateInstanceProxy<MemoriesProxy>(isolate, instance));
}
if (TablesProxy::Count(isolate, instance) != 0) {
- Handle<String> tables_str =
- isolate->factory()->NewStringFromAsciiChecked("[[Tables]]");
- Handle<Object> tables_obj =
- GetOrCreateInstanceProxy<TablesProxy>(isolate, instance);
- result->set(length++, *tables_str);
- result->set(length++, *tables_obj);
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[Tables]]"),
+ GetOrCreateInstanceProxy<TablesProxy>(isolate, instance));
}
- return isolate->factory()->NewJSArrayWithElements(result, PACKED_ELEMENTS,
- length);
+ return result;
}
-Handle<JSArray> GetWasmModuleObjectInternalProperties(
+Handle<ArrayList> AddWasmModuleObjectInternalProperties(
+ Isolate* isolate, Handle<ArrayList> result,
Handle<WasmModuleObject> module_object) {
- Isolate* isolate = module_object->GetIsolate();
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(2 * 2);
- int length = 0;
-
- Handle<String> exports_str =
- isolate->factory()->NewStringFromStaticChars("[[Exports]]");
- Handle<JSArray> exports_obj = wasm::GetExports(isolate, module_object);
- result->set(length++, *exports_str);
- result->set(length++, *exports_obj);
-
- Handle<String> imports_str =
- isolate->factory()->NewStringFromStaticChars("[[Imports]]");
- Handle<JSArray> imports_obj = wasm::GetImports(isolate, module_object);
- result->set(length++, *imports_str);
- result->set(length++, *imports_obj);
-
- return isolate->factory()->NewJSArrayWithElements(result, PACKED_ELEMENTS,
- length);
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromStaticChars("[[Exports]]"),
+ wasm::GetExports(isolate, module_object));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromStaticChars("[[Imports]]"),
+ wasm::GetImports(isolate, module_object));
+ return result;
}
} // namespace internal
diff --git a/chromium/v8/src/debug/debug-wasm-objects.h b/chromium/v8/src/debug/debug-wasm-objects.h
index 6eb075b9b6a..62e437b727c 100644
--- a/chromium/v8/src/debug/debug-wasm-objects.h
+++ b/chromium/v8/src/debug/debug-wasm-objects.h
@@ -28,6 +28,7 @@ class WasmValue;
#include "torque-generated/src/debug/debug-wasm-objects-tq.inc"
+class ArrayList;
class WasmFrame;
class WasmInstanceObject;
class WasmModuleObject;
@@ -68,9 +69,15 @@ Handle<JSObject> GetWasmDebugProxy(WasmFrame* frame);
std::unique_ptr<debug::ScopeIterator> GetWasmScopeIterator(WasmFrame* frame);
-Handle<JSArray> GetWasmInstanceObjectInternalProperties(
+Handle<String> GetWasmFunctionDebugName(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ uint32_t func_index);
+
+Handle<ArrayList> AddWasmInstanceObjectInternalProperties(
+ Isolate* isolate, Handle<ArrayList> result,
Handle<WasmInstanceObject> instance);
-Handle<JSArray> GetWasmModuleObjectInternalProperties(
+Handle<ArrayList> AddWasmModuleObjectInternalProperties(
+ Isolate* isolate, Handle<ArrayList> result,
Handle<WasmModuleObject> module_object);
} // namespace internal
diff --git a/chromium/v8/src/debug/debug.cc b/chromium/v8/src/debug/debug.cc
index 0b873f7c8c3..42b800ef75d 100644
--- a/chromium/v8/src/debug/debug.cc
+++ b/chromium/v8/src/debug/debug.cc
@@ -352,7 +352,6 @@ void Debug::ThreadInit() {
thread_local_.return_value_ = Smi::zero();
thread_local_.last_breakpoint_id_ = 0;
clear_suspended_generator();
- thread_local_.restart_fp_ = kNullAddress;
base::Relaxed_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
thread_local_.break_on_next_function_call_ = false;
@@ -439,9 +438,6 @@ void Debug::Unload() {
}
void Debug::Break(JavaScriptFrame* frame, Handle<JSFunction> break_target) {
- // Initialize LiveEdit.
- LiveEdit::InitializeThreadLocal(this);
-
// Just continue if breaks are disabled or debugger cannot be loaded.
if (break_disabled()) return;
@@ -1245,7 +1241,7 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
BaselineFrame* frame = BaselineFrame::cast(it.frame());
int bytecode_offset = frame->GetBytecodeOffset();
Address* pc_addr = frame->pc_address();
- Address advance = BUILTIN_CODE(isolate, InterpreterEnterBytecodeAdvance)
+ Address advance = BUILTIN_CODE(isolate, InterpreterEnterAtNextBytecode)
->InstructionStart();
PointerAuthentication::ReplacePC(pc_addr, advance, kSystemPointerSize);
InterpretedFrame::cast(it.Reframe())
@@ -1264,8 +1260,8 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
Address* pc_addr = frame->pc_address();
Builtins::Name advance =
builtin_index == Builtins::kBaselineEnterAtBytecode
- ? Builtins::kInterpreterEnterBytecodeDispatch
- : Builtins::kInterpreterEnterBytecodeAdvance;
+ ? Builtins::kInterpreterEnterAtBytecode
+ : Builtins::kInterpreterEnterAtNextBytecode;
Address advance_pc =
isolate->builtins()->builtin(advance).InstructionStart();
PointerAuthentication::ReplacePC(pc_addr, advance_pc,
@@ -1353,7 +1349,8 @@ void Debug::PrepareFunctionForDebugExecution(
DCHECK(shared->is_compiled());
DCHECK(shared->HasDebugInfo());
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
- if (debug_info->flags() & DebugInfo::kPreparedForDebugExecution) return;
+ if (debug_info->flags(kRelaxedLoad) & DebugInfo::kPreparedForDebugExecution)
+ return;
if (shared->HasBytecodeArray()) {
SharedFunctionInfo::InstallDebugBytecode(shared, isolate_);
@@ -1372,8 +1369,9 @@ void Debug::PrepareFunctionForDebugExecution(
redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
}
- debug_info->set_flags(debug_info->flags() |
- DebugInfo::kPreparedForDebugExecution);
+ debug_info->set_flags(
+ debug_info->flags(kRelaxedLoad) | DebugInfo::kPreparedForDebugExecution,
+ kRelaxedStore);
}
void Debug::InstallDebugBreakTrampoline() {
@@ -1630,7 +1628,9 @@ bool Debug::FindSharedFunctionInfosIntersectingRange(
}
if (!triedTopLevelCompile && !candidateSubsumesRange &&
- script->shared_function_infos().length() > 0) {
+ script->shared_function_info_count() > 0) {
+ DCHECK_LE(script->shared_function_info_count(),
+ script->shared_function_infos().length());
MaybeObject maybeToplevel = script->shared_function_infos().Get(0);
HeapObject heap_object;
const bool topLevelInfoExists =
@@ -1751,10 +1751,10 @@ void Debug::CreateBreakInfo(Handle<SharedFunctionInfo> shared) {
Handle<FixedArray> break_points(
factory->NewFixedArray(DebugInfo::kEstimatedNofBreakPointsInFunction));
- int flags = debug_info->flags();
+ int flags = debug_info->flags(kRelaxedLoad);
flags |= DebugInfo::kHasBreakInfo;
if (CanBreakAtEntry(shared)) flags |= DebugInfo::kCanBreakAtEntry;
- debug_info->set_flags(flags);
+ debug_info->set_flags(flags, kRelaxedStore);
debug_info->set_break_points(*break_points);
SharedFunctionInfo::EnsureSourcePositionsAvailable(isolate_, shared);
@@ -1781,7 +1781,9 @@ void Debug::InstallCoverageInfo(Handle<SharedFunctionInfo> shared,
DCHECK(!debug_info->HasCoverageInfo());
- debug_info->set_flags(debug_info->flags() | DebugInfo::kHasCoverageInfo);
+ debug_info->set_flags(
+ debug_info->flags(kRelaxedLoad) | DebugInfo::kHasCoverageInfo,
+ kRelaxedStore);
debug_info->set_coverage_info(*coverage_info);
}
@@ -1871,27 +1873,6 @@ bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
return location.IsReturn();
}
-void Debug::ScheduleFrameRestart(StackFrame* frame) {
- // Set a target FP for the FrameDropperTrampoline builtin to drop to once
- // we return from the debugger.
- DCHECK(frame->is_java_script());
- // Only reschedule to a frame further below a frame we already scheduled for.
- if (frame->fp() <= thread_local_.restart_fp_) return;
- // If the frame is optimized, trigger a deopt and jump into the
- // FrameDropperTrampoline in the deoptimizer.
- thread_local_.restart_fp_ = frame->fp();
-
- // Reset break frame ID to the frame below the restarted frame.
- StackTraceFrameIterator it(isolate_);
- thread_local_.break_frame_id_ = StackFrameId::NO_ID;
- for (StackTraceFrameIterator it(isolate_); !it.done(); it.Advance()) {
- if (it.frame()->fp() > thread_local_.restart_fp_) {
- thread_local_.break_frame_id_ = it.frame()->id();
- return;
- }
- }
-}
-
Handle<FixedArray> Debug::GetLoadedScripts() {
isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kDebugger);
@@ -2243,8 +2224,6 @@ void Debug::UpdateHookOnFunctionCall() {
}
void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
- // Initialize LiveEdit.
- LiveEdit::InitializeThreadLocal(this);
// Ignore debug break during bootstrapping.
if (isolate_->bootstrapper()->IsActive()) return;
// Just continue if breaks are disabled.
@@ -2291,12 +2270,14 @@ void Debug::PrintBreakLocation() {
StackTraceFrameIterator iterator(isolate_);
if (iterator.done()) return;
CommonFrame* frame = iterator.frame();
- FrameSummary summary = FrameSummary::GetTop(frame);
- summary.EnsureSourcePositionsAvailable();
- int source_position = summary.SourcePosition();
- Handle<Object> script_obj = summary.script();
+ std::vector<FrameSummary> frames;
+ frame->Summarize(&frames);
+ int inlined_frame_index = static_cast<int>(frames.size() - 1);
+ FrameInspector inspector(frame, inlined_frame_index, isolate_);
+ int source_position = inspector.GetSourcePosition();
+ Handle<Object> script_obj = inspector.GetScript();
PrintF("[debug] break in function '");
- summary.FunctionName()->PrintOn(stdout);
+ inspector.GetFunctionName()->PrintOn(stdout);
PrintF("'.\n");
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
diff --git a/chromium/v8/src/debug/debug.h b/chromium/v8/src/debug/debug.h
index 86c067c0357..28c6942c42f 100644
--- a/chromium/v8/src/debug/debug.h
+++ b/chromium/v8/src/debug/debug.h
@@ -311,9 +311,6 @@ class V8_EXPORT_PRIVATE Debug {
// Check whether this frame is just about to return.
bool IsBreakAtReturn(JavaScriptFrame* frame);
- // Support for LiveEdit
- void ScheduleFrameRestart(StackFrame* frame);
-
bool AllFramesOnStackAreBlackboxed();
// Set new script source, throw an exception if error occurred. When preview
@@ -380,13 +377,6 @@ class V8_EXPORT_PRIVATE Debug {
return reinterpret_cast<Address>(&thread_local_.suspended_generator_);
}
- Address restart_fp_address() {
- return reinterpret_cast<Address>(&thread_local_.restart_fp_);
- }
- bool will_restart() const {
- return thread_local_.restart_fp_ != kNullAddress;
- }
-
StepAction last_step_action() { return thread_local_.last_step_action_; }
bool break_on_next_function_call() const {
return thread_local_.break_on_next_function_call_;
@@ -548,9 +538,6 @@ class V8_EXPORT_PRIVATE Debug {
// The suspended generator object to track when stepping.
Object suspended_generator_;
- // The new frame pointer to drop to when restarting a frame.
- Address restart_fp_;
-
// Last used inspector breakpoint id.
int last_breakpoint_id_;
@@ -669,25 +656,6 @@ class SuppressDebug {
bool old_state_;
};
-// Code generator routines.
-class DebugCodegen : public AllStatic {
- public:
- enum DebugBreakCallHelperMode {
- SAVE_RESULT_REGISTER,
- IGNORE_RESULT_REGISTER
- };
-
- // Builtin to drop frames to restart function.
- static void GenerateFrameDropperTrampoline(MacroAssembler* masm);
-
- // Builtin to atomically (wrt deopts) handle debugger statement and
- // drop frames to restart function if necessary.
- static void GenerateHandleDebuggerStatement(MacroAssembler* masm);
-
- // Builtin to trigger a debug break before entering the function.
- static void GenerateDebugBreakTrampoline(MacroAssembler* masm);
-};
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/debug/ia32/debug-ia32.cc b/chromium/v8/src/debug/ia32/debug-ia32.cc
deleted file mode 100644
index 72d4ac37df8..00000000000
--- a/chromium/v8/src/debug/ia32/debug-ia32.cc
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ ret(0);
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by eax.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
- __ mov(ebp, eax);
- __ mov(edi, Operand(ebp, StandardFrameConstants::kFunctionOffset));
- __ mov(eax, Operand(ebp, StandardFrameConstants::kArgCOffset));
- __ leave();
-
- // The arguments are already in the stack (including any necessary padding),
- // we should not try to massage the arguments again.
- __ mov(ecx, Immediate(kDontAdaptArgumentsSentinel));
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
- __ InvokeFunctionCode(edi, no_reg, ecx, eax, JUMP_FUNCTION);
-}
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/chromium/v8/src/debug/liveedit.cc b/chromium/v8/src/debug/liveedit.cc
index 294f0e1f7bb..a3065bb49fb 100644
--- a/chromium/v8/src/debug/liveedit.cc
+++ b/chromium/v8/src/debug/liveedit.cc
@@ -66,31 +66,35 @@ class Differencer {
public:
explicit Differencer(Comparator::Input* input)
: input_(input), len1_(input->GetLength1()), len2_(input->GetLength2()) {
- buffer_ = NewArray<int>(len1_ * len2_);
- }
- ~Differencer() {
- DeleteArray(buffer_);
}
void Initialize() {
- int array_size = len1_ * len2_;
- for (int i = 0; i < array_size; i++) {
- buffer_[i] = kEmptyCellValue;
- }
}
// Makes sure that result for the full problem is calculated and stored
// in the table together with flags showing a path through subproblems.
void FillTable() {
- CompareUpToTail(0, 0);
+ // Determine common prefix to skip.
+ int minLen = std::min(len1_, len2_);
+ while (prefixLen_ < minLen && input_->Equals(prefixLen_, prefixLen_)) {
+ ++prefixLen_;
+ }
+
+ // Pre-fill common suffix in the table.
+ for (int pos1 = len1_, pos2 = len2_; pos1 > prefixLen_ &&
+ pos2 > prefixLen_ &&
+ input_->Equals(--pos1, --pos2);) {
+ set_value4_and_dir(pos1, pos2, 0, EQ);
+ }
+
+ CompareUpToTail(prefixLen_, prefixLen_);
}
void SaveResult(Comparator::Output* chunk_writer) {
ResultWriter writer(chunk_writer);
- int pos1 = 0;
- int pos2 = 0;
- while (true) {
+ if (prefixLen_) writer.eq(prefixLen_);
+ for (int pos1 = prefixLen_, pos2 = prefixLen_; true;) {
if (pos1 < len1_) {
if (pos2 < len2_) {
Direction dir = get_direction(pos1, pos2);
@@ -128,9 +132,10 @@ class Differencer {
private:
Comparator::Input* input_;
- int* buffer_;
+ std::map<std::pair<int, int>, int> buffer_;
int len1_;
int len2_;
+ int prefixLen_ = 0;
enum Direction {
EQ = 0,
@@ -144,51 +149,51 @@ class Differencer {
// Computes result for a subtask and optionally caches it in the buffer table.
// All results values are shifted to make space for flags in the lower bits.
int CompareUpToTail(int pos1, int pos2) {
- if (pos1 < len1_) {
- if (pos2 < len2_) {
- int cached_res = get_value4(pos1, pos2);
- if (cached_res == kEmptyCellValue) {
- Direction dir;
- int res;
- if (input_->Equals(pos1, pos2)) {
- res = CompareUpToTail(pos1 + 1, pos2 + 1);
- dir = EQ;
- } else {
- int res1 = CompareUpToTail(pos1 + 1, pos2) +
- (1 << kDirectionSizeBits);
- int res2 = CompareUpToTail(pos1, pos2 + 1) +
- (1 << kDirectionSizeBits);
- if (res1 == res2) {
- res = res1;
- dir = SKIP_ANY;
- } else if (res1 < res2) {
- res = res1;
- dir = SKIP1;
- } else {
- res = res2;
- dir = SKIP2;
- }
- }
- set_value4_and_dir(pos1, pos2, res, dir);
- cached_res = res;
- }
- return cached_res;
+ if (pos1 == len1_) {
+ return (len2_ - pos2) << kDirectionSizeBits;
+ }
+ if (pos2 == len2_) {
+ return (len1_ - pos1) << kDirectionSizeBits;
+ }
+ int res = get_value4(pos1, pos2);
+ if (res != kEmptyCellValue) {
+ return res;
+ }
+ Direction dir;
+ if (input_->Equals(pos1, pos2)) {
+ res = CompareUpToTail(pos1 + 1, pos2 + 1);
+ dir = EQ;
+ } else {
+ int res1 = CompareUpToTail(pos1 + 1, pos2) + (1 << kDirectionSizeBits);
+ int res2 = CompareUpToTail(pos1, pos2 + 1) + (1 << kDirectionSizeBits);
+ if (res1 == res2) {
+ res = res1;
+ dir = SKIP_ANY;
+ } else if (res1 < res2) {
+ res = res1;
+ dir = SKIP1;
} else {
- return (len1_ - pos1) << kDirectionSizeBits;
+ res = res2;
+ dir = SKIP2;
}
- } else {
- return (len2_ - pos2) << kDirectionSizeBits;
}
+ set_value4_and_dir(pos1, pos2, res, dir);
+ return res;
}
- inline int& get_cell(int i1, int i2) {
- return buffer_[i1 + i2 * len1_];
+ inline int get_cell(int i1, int i2) {
+ auto it = buffer_.find(std::make_pair(i1, i2));
+ return it == buffer_.end() ? kEmptyCellValue : it->second;
+ }
+
+ inline void set_cell(int i1, int i2, int value) {
+ buffer_.insert(std::make_pair(std::make_pair(i1, i2), value));
}
// Each cell keeps a value plus direction. Value is multiplied by 4.
void set_value4_and_dir(int i1, int i2, int value4, Direction dir) {
DCHECK_EQ(0, value4 & kDirectionMask);
- get_cell(i1, i2) = value4 | dir;
+ set_cell(i1, i2, value4 | dir);
}
int get_value4(int i1, int i2) {
@@ -214,10 +219,10 @@ class Differencer {
: chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
}
- void eq() {
+ void eq(int len = 1) {
FlushChunk();
- pos1_++;
- pos2_++;
+ pos1_ += len;
+ pos2_ += len;
}
void skip1(int len1) {
StartChunk();
@@ -782,10 +787,8 @@ bool ParseScript(Isolate* isolate, Handle<Script> script, ParseInfo* parse_info,
}
struct FunctionData {
- FunctionData(FunctionLiteral* literal, bool should_restart)
- : literal(literal),
- stack_position(NOT_ON_STACK),
- should_restart(should_restart) {}
+ explicit FunctionData(FunctionLiteral* literal)
+ : literal(literal), stack_position(NOT_ON_STACK) {}
FunctionLiteral* literal;
MaybeHandle<SharedFunctionInfo> shared;
@@ -794,23 +797,14 @@ struct FunctionData {
// In case of multiple functions with different stack position, the latest
// one (in the order below) is used, since it is the most restrictive.
// This is important only for functions to be restarted.
- enum StackPosition {
- NOT_ON_STACK,
- ABOVE_BREAK_FRAME,
- PATCHABLE,
- BELOW_NON_DROPPABLE_FRAME,
- ARCHIVED_THREAD,
- };
+ enum StackPosition { NOT_ON_STACK, ON_STACK };
StackPosition stack_position;
- bool should_restart;
};
class FunctionDataMap : public ThreadVisitor {
public:
- void AddInterestingLiteral(int script_id, FunctionLiteral* literal,
- bool should_restart) {
- map_.emplace(GetFuncId(script_id, literal),
- FunctionData{literal, should_restart});
+ void AddInterestingLiteral(int script_id, FunctionLiteral* literal) {
+ map_.emplace(GetFuncId(script_id, literal), FunctionData{literal});
}
bool Lookup(SharedFunctionInfo sfi, FunctionData** data) {
@@ -827,7 +821,7 @@ class FunctionDataMap : public ThreadVisitor {
return Lookup(GetFuncId(script->id(), literal), data);
}
- void Fill(Isolate* isolate, Address* restart_frame_fp) {
+ void Fill(Isolate* isolate) {
{
HeapObjectIterator iterator(isolate->heap(),
HeapObjectIterator::kFilterUnreachable);
@@ -854,38 +848,11 @@ class FunctionDataMap : public ThreadVisitor {
}
}
}
- FunctionData::StackPosition stack_position =
- isolate->debug()->break_frame_id() == StackFrameId::NO_ID
- ? FunctionData::PATCHABLE
- : FunctionData::ABOVE_BREAK_FRAME;
- for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
- StackFrame* frame = it.frame();
- if (stack_position == FunctionData::ABOVE_BREAK_FRAME) {
- if (frame->id() == isolate->debug()->break_frame_id()) {
- stack_position = FunctionData::PATCHABLE;
- }
- }
- if (stack_position == FunctionData::PATCHABLE &&
- (frame->is_exit() || frame->is_builtin_exit())) {
- stack_position = FunctionData::BELOW_NON_DROPPABLE_FRAME;
- continue;
- }
- if (!frame->is_java_script()) continue;
- std::vector<Handle<SharedFunctionInfo>> sfis;
- JavaScriptFrame::cast(frame)->GetFunctions(&sfis);
- for (auto& sfi : sfis) {
- if (stack_position == FunctionData::PATCHABLE &&
- IsResumableFunction(sfi->kind())) {
- stack_position = FunctionData::BELOW_NON_DROPPABLE_FRAME;
- }
- FunctionData* data = nullptr;
- if (!Lookup(*sfi, &data)) continue;
- if (!data->should_restart) continue;
- data->stack_position = stack_position;
- *restart_frame_fp = frame->fp();
- }
- }
+ // Visit the current thread stack.
+ VisitThread(isolate, isolate->thread_local_top());
+
+ // Visit the stacks of all archived threads.
isolate->thread_manager()->IterateArchivedThreads(this);
}
@@ -932,7 +899,7 @@ class FunctionDataMap : public ThreadVisitor {
for (auto& sfi : sfis) {
FunctionData* data = nullptr;
if (!Lookup(*sfi, &data)) continue;
- data->stack_position = FunctionData::ARCHIVED_THREAD;
+ data->stack_position = FunctionData::ON_STACK;
}
}
}
@@ -940,11 +907,10 @@ class FunctionDataMap : public ThreadVisitor {
std::map<FuncId, FunctionData> map_;
};
-bool CanPatchScript(
- const LiteralMap& changed, Handle<Script> script, Handle<Script> new_script,
- FunctionDataMap& function_data_map, // NOLINT(runtime/references)
- debug::LiveEditResult* result) {
- debug::LiveEditResult::Status status = debug::LiveEditResult::OK;
+bool CanPatchScript(const LiteralMap& changed, Handle<Script> script,
+ Handle<Script> new_script,
+ FunctionDataMap& function_data_map,
+ debug::LiveEditResult* result) {
for (const auto& mapping : changed) {
FunctionData* data = nullptr;
function_data_map.Lookup(script, mapping.first, &data);
@@ -953,55 +919,11 @@ bool CanPatchScript(
Handle<SharedFunctionInfo> sfi;
if (!data->shared.ToHandle(&sfi)) {
continue;
- } else if (!data->should_restart) {
- UNREACHABLE();
- } else if (data->stack_position == FunctionData::ABOVE_BREAK_FRAME) {
- status = debug::LiveEditResult::BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME;
- } else if (data->stack_position ==
- FunctionData::BELOW_NON_DROPPABLE_FRAME) {
- status =
- debug::LiveEditResult::BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME;
- } else if (!data->running_generators.empty()) {
- status = debug::LiveEditResult::BLOCKED_BY_RUNNING_GENERATOR;
- } else if (data->stack_position == FunctionData::ARCHIVED_THREAD) {
- status = debug::LiveEditResult::BLOCKED_BY_ACTIVE_FUNCTION;
- }
- if (status != debug::LiveEditResult::OK) {
- result->status = status;
+ } else if (data->stack_position == FunctionData::ON_STACK) {
+ result->status = debug::LiveEditResult::BLOCKED_BY_ACTIVE_FUNCTION;
return false;
- }
- }
- return true;
-}
-
-bool CanRestartFrame(
- Isolate* isolate, Address fp,
- FunctionDataMap& function_data_map, // NOLINT(runtime/references)
- const LiteralMap& changed, debug::LiveEditResult* result) {
- DCHECK_GT(fp, 0);
- StackFrame* restart_frame = nullptr;
- StackFrameIterator it(isolate);
- for (; !it.done(); it.Advance()) {
- if (it.frame()->fp() == fp) {
- restart_frame = it.frame();
- break;
- }
- }
- DCHECK(restart_frame && restart_frame->is_java_script());
- if (!LiveEdit::kFrameDropperSupported) {
- result->status = debug::LiveEditResult::FRAME_RESTART_IS_NOT_SUPPORTED;
- return false;
- }
- std::vector<Handle<SharedFunctionInfo>> sfis;
- JavaScriptFrame::cast(restart_frame)->GetFunctions(&sfis);
- for (auto& sfi : sfis) {
- FunctionData* data = nullptr;
- if (!function_data_map.Lookup(*sfi, &data)) continue;
- auto new_literal_it = changed.find(data->literal);
- if (new_literal_it == changed.end()) continue;
- if (new_literal_it->second->scope()->new_target_var()) {
- result->status =
- debug::LiveEditResult::BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME;
+ } else if (!data->running_generators.empty()) {
+ result->status = debug::LiveEditResult::BLOCKED_BY_RUNNING_GENERATOR;
return false;
}
}
@@ -1092,24 +1014,17 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
FunctionDataMap function_data_map;
for (const auto& mapping : changed) {
- function_data_map.AddInterestingLiteral(script->id(), mapping.first, true);
- function_data_map.AddInterestingLiteral(new_script->id(), mapping.second,
- false);
+ function_data_map.AddInterestingLiteral(script->id(), mapping.first);
+ function_data_map.AddInterestingLiteral(new_script->id(), mapping.second);
}
for (const auto& mapping : unchanged) {
- function_data_map.AddInterestingLiteral(script->id(), mapping.first, false);
+ function_data_map.AddInterestingLiteral(script->id(), mapping.first);
}
- Address restart_frame_fp = 0;
- function_data_map.Fill(isolate, &restart_frame_fp);
+ function_data_map.Fill(isolate);
if (!CanPatchScript(changed, script, new_script, function_data_map, result)) {
return;
}
- if (restart_frame_fp &&
- !CanRestartFrame(isolate, restart_frame_fp, function_data_map, changed,
- result)) {
- return;
- }
if (preview) {
result->status = debug::LiveEditResult::OK;
@@ -1273,16 +1188,6 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
}
#endif
- if (restart_frame_fp) {
- for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
- if (it.frame()->fp() == restart_frame_fp) {
- isolate->debug()->ScheduleFrameRestart(it.frame());
- result->stack_changed = true;
- break;
- }
- }
- }
-
int script_id = script->id();
script->set_id(new_script->id());
new_script->set_id(script_id);
@@ -1290,42 +1195,6 @@ void LiveEdit::PatchScript(Isolate* isolate, Handle<Script> script,
result->script = ToApiHandle<v8::debug::Script>(new_script);
}
-void LiveEdit::InitializeThreadLocal(Debug* debug) {
- debug->thread_local_.restart_fp_ = 0;
-}
-
-bool LiveEdit::RestartFrame(JavaScriptFrame* frame) {
- if (!LiveEdit::kFrameDropperSupported) return false;
- Isolate* isolate = frame->isolate();
- StackFrameId break_frame_id = isolate->debug()->break_frame_id();
- bool break_frame_found = break_frame_id == StackFrameId::NO_ID;
- for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
- StackFrame* current = it.frame();
- break_frame_found = break_frame_found || break_frame_id == current->id();
- if (current->fp() == frame->fp()) {
- if (break_frame_found) {
- isolate->debug()->ScheduleFrameRestart(current);
- return true;
- } else {
- return false;
- }
- }
- if (!break_frame_found) continue;
- if (current->is_exit() || current->is_builtin_exit()) {
- return false;
- }
- if (!current->is_java_script()) continue;
- std::vector<Handle<SharedFunctionInfo>> shareds;
- JavaScriptFrame::cast(current)->GetFunctions(&shareds);
- for (auto& shared : shareds) {
- if (IsResumableFunction(shared->kind())) {
- return false;
- }
- }
- }
- return false;
-}
-
void LiveEdit::CompareStrings(Isolate* isolate, Handle<String> s1,
Handle<String> s2,
std::vector<SourceChangeRange>* diffs) {
diff --git a/chromium/v8/src/debug/liveedit.h b/chromium/v8/src/debug/liveedit.h
index 4291efb2d0e..5e06d3f2a3f 100644
--- a/chromium/v8/src/debug/liveedit.h
+++ b/chromium/v8/src/debug/liveedit.h
@@ -56,11 +56,6 @@ struct SourceChangeRange {
class V8_EXPORT_PRIVATE LiveEdit : AllStatic {
public:
- static void InitializeThreadLocal(Debug* debug);
-
- // Restarts the call frame and completely drops all frames above it.
- static bool RestartFrame(JavaScriptFrame* frame);
-
static void CompareStrings(Isolate* isolate, Handle<String> a,
Handle<String> b,
std::vector<SourceChangeRange>* diffs);
@@ -69,8 +64,6 @@ class V8_EXPORT_PRIVATE LiveEdit : AllStatic {
static void PatchScript(Isolate* isolate, Handle<Script> script,
Handle<String> source, bool preview,
debug::LiveEditResult* result);
- // Architecture-specific constant.
- static const bool kFrameDropperSupported;
};
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/debug/mips/debug-mips.cc b/chromium/v8/src/debug/mips/debug-mips.cc
deleted file mode 100644
index d1ab6ec545f..00000000000
--- a/chromium/v8/src/debug/mips/debug-mips.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ Ret();
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by a1.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
- __ mov(fp, a1);
- __ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ lw(a0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-
- // Pop return address and frame.
- __ LeaveFrame(StackFrame::INTERNAL);
-
- __ li(a2, Operand(kDontAdaptArgumentsSentinel));
-
- __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/chromium/v8/src/debug/mips64/debug-mips64.cc b/chromium/v8/src/debug/mips64/debug-mips64.cc
deleted file mode 100644
index 7b8e9e97447..00000000000
--- a/chromium/v8/src/debug/mips64/debug-mips64.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ Ret();
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by a1.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
- __ mov(fp, a1);
- __ Ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ Ld(a0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
-
- // Pop return address and frame.
- __ LeaveFrame(StackFrame::INTERNAL);
-
- __ li(a2, Operand(kDontAdaptArgumentsSentinel));
-
- __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
-}
-
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/chromium/v8/src/debug/ppc/OWNERS b/chromium/v8/src/debug/ppc/OWNERS
deleted file mode 100644
index 02c2cd757c9..00000000000
--- a/chromium/v8/src/debug/ppc/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-junyan@redhat.com
-joransiu@ca.ibm.com
-midawson@redhat.com
-mfarazma@redhat.com
-vasili.skurydzin@ibm.com
diff --git a/chromium/v8/src/debug/ppc/debug-ppc.cc b/chromium/v8/src/debug/ppc/debug-ppc.cc
deleted file mode 100644
index c083708d3a4..00000000000
--- a/chromium/v8/src/debug/ppc/debug-ppc.cc
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ Ret();
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by r4.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
-
- __ mr(fp, r4);
- __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- __ LeaveFrame(StackFrame::INTERNAL);
-
- // The arguments are already in the stack (including any necessary padding),
- // we should not try to massage the arguments again.
- __ mov(r5, Operand(kDontAdaptArgumentsSentinel));
- __ InvokeFunction(r4, r5, r3, JUMP_FUNCTION);
-}
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
diff --git a/chromium/v8/src/debug/riscv64/debug-riscv64.cc b/chromium/v8/src/debug/riscv64/debug-riscv64.cc
deleted file mode 100644
index b2923001509..00000000000
--- a/chromium/v8/src/debug/riscv64/debug-riscv64.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2021 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_RISCV64
-
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/debug.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ Ret();
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by a1.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
- __ mv(fp, a1);
- __ Ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
-
- // Pop return address and frame.
- __ LeaveFrame(StackFrame::INTERNAL);
-
- __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ Lhu(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mv(a2, a0);
-
- __ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
-}
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_RISCV64
diff --git a/chromium/v8/src/debug/s390/debug-s390.cc b/chromium/v8/src/debug/s390/debug-s390.cc
deleted file mode 100644
index b58e70b8511..00000000000
--- a/chromium/v8/src/debug/s390/debug-s390.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/init/v8.h"
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ Ret();
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by r3.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
-
- __ mov(fp, r3);
- __ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
- __ LoadU64(r2, MemOperand(fp, StandardFrameConstants::kArgCOffset));
- __ LeaveFrame(StackFrame::INTERNAL);
-
- // The arguments are already in the stack (including any necessary padding),
- // we should not try to massage the arguments again.
- __ mov(r4, Operand(kDontAdaptArgumentsSentinel));
- __ InvokeFunction(r3, r4, r2, JUMP_FUNCTION);
-}
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/chromium/v8/src/debug/x64/debug-x64.cc b/chromium/v8/src/debug/x64/debug-x64.cc
deleted file mode 100644
index 22092138318..00000000000
--- a/chromium/v8/src/debug/x64/debug-x64.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/debug/debug.h"
-
-#include "src/codegen/assembler.h"
-#include "src/codegen/macro-assembler.h"
-#include "src/debug/liveedit.h"
-#include "src/execution/frames-inl.h"
-#include "src/objects/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
- }
- __ MaybeDropFrames();
-
- // Return to caller.
- __ ret(0);
-}
-
-void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
- // Frame is being dropped:
- // - Drop to the target frame specified by rbx.
- // - Look up current function on the frame.
- // - Leave the frame.
- // - Restart the frame by calling the function.
- __ movq(rbp, rbx);
- __ movq(rdi, Operand(rbp, StandardFrameConstants::kFunctionOffset));
- __ movq(rax, Operand(rbp, StandardFrameConstants::kArgCOffset));
- __ leave();
-
- // The arguments are already in the stack (including any necessary padding),
- // we should not try to massage the arguments again.
- __ movq(rbx, Immediate(kDontAdaptArgumentsSentinel));
- __ InvokeFunction(rdi, no_reg, rbx, rax, JUMP_FUNCTION);
-}
-
-const bool LiveEdit::kFrameDropperSupported = true;
-
-#undef __
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/chromium/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc b/chromium/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
index 0b7472dbea6..bbeacc561dc 100644
--- a/chromium/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
+++ b/chromium/v8/src/deoptimizer/deoptimizer-cfi-builtins.cc
@@ -6,8 +6,8 @@
#include "src/deoptimizer/deoptimizer.h"
extern "C" {
-void Builtins_InterpreterEnterBytecodeAdvance();
-void Builtins_InterpreterEnterBytecodeDispatch();
+void Builtins_InterpreterEnterAtBytecode();
+void Builtins_InterpreterEnterAtNextBytecode();
void Builtins_ContinueToCodeStubBuiltinWithResult();
void Builtins_ContinueToCodeStubBuiltin();
void Builtins_ContinueToJavaScriptBuiltinWithResult();
@@ -24,8 +24,8 @@ namespace internal {
// List of allowed builtin addresses that we can return to in the deoptimizer.
constexpr function_ptr builtins[] = {
- &Builtins_InterpreterEnterBytecodeAdvance,
- &Builtins_InterpreterEnterBytecodeDispatch,
+ &Builtins_InterpreterEnterAtBytecode,
+ &Builtins_InterpreterEnterAtNextBytecode,
&Builtins_ContinueToCodeStubBuiltinWithResult,
&Builtins_ContinueToCodeStubBuiltin,
&Builtins_ContinueToJavaScriptBuiltinWithResult,
diff --git a/chromium/v8/src/deoptimizer/deoptimizer.cc b/chromium/v8/src/deoptimizer/deoptimizer.cc
index 7c4562dbd56..eabd9ebb41c 100644
--- a/chromium/v8/src/deoptimizer/deoptimizer.cc
+++ b/chromium/v8/src/deoptimizer/deoptimizer.cc
@@ -380,8 +380,7 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kDeoptimizeCode);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
TraceDeoptAll(isolate);
@@ -399,8 +398,7 @@ void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
}
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kDeoptimizeCode);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
TraceDeoptMarked(isolate);
@@ -427,8 +425,7 @@ void Deoptimizer::MarkAllCodeForContext(NativeContext native_context) {
void Deoptimizer::DeoptimizeFunction(JSFunction function, Code code) {
Isolate* isolate = function.GetIsolate();
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kDeoptimizeCode);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kDeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
function.ResetIfBytecodeFlushed();
@@ -969,8 +966,8 @@ void Deoptimizer::DoComputeOutputFrames() {
topmost->GetRegisterValues()->SetRegister(kRootRegister.code(),
isolate()->isolate_root());
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
- topmost->GetRegisterValues()->SetRegister(kPointerCageBaseRegister.code(),
- isolate()->isolate_root());
+ topmost->GetRegisterValues()->SetRegister(kPtrComprCageBaseRegister.code(),
+ isolate()->cage_base());
#endif
// Print some helpful diagnostic information.
@@ -999,8 +996,8 @@ Builtins::Name DispatchBuiltinFor(bool is_baseline, bool advance_bc) {
return advance_bc ? Builtins::kBaselineEnterAtNextBytecode
: Builtins::kBaselineEnterAtBytecode;
} else {
- return advance_bc ? Builtins::kInterpreterEnterBytecodeAdvance
- : Builtins::kInterpreterEnterBytecodeDispatch;
+ return advance_bc ? Builtins::kInterpreterEnterAtNextBytecode
+ : Builtins::kInterpreterEnterAtBytecode;
}
}
diff --git a/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc b/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
index 1cbe85ba5df..b7dceed503d 100644
--- a/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
+++ b/chromium/v8/src/deoptimizer/riscv64/deoptimizer-riscv64.cc
@@ -8,9 +8,9 @@ namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
-const int Deoptimizer::kNonLazyDeoptExitSize = 5 * kInstrSize;
-const int Deoptimizer::kLazyDeoptExitSize = 5 * kInstrSize;
-const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 6 * kInstrSize;
+const int Deoptimizer::kNonLazyDeoptExitSize = 4 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 4 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 5 * kInstrSize;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 4 * kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
diff --git a/chromium/v8/src/deoptimizer/translated-state.cc b/chromium/v8/src/deoptimizer/translated-state.cc
index 02c473d22b1..abbc0104a8f 100644
--- a/chromium/v8/src/deoptimizer/translated-state.cc
+++ b/chromium/v8/src/deoptimizer/translated-state.cc
@@ -514,6 +514,12 @@ Handle<Object> TranslatedValue::GetValue() {
// pass the verifier.
container_->EnsureObjectAllocatedAt(this);
+ // Finish any sweeping so that it becomes safe to overwrite the ByteArray
+ // headers.
+ // TODO(hpayer): Find a cleaner way to support a group of
+ // non-fully-initialized objects.
+ isolate()->heap()->mark_compact_collector()->EnsureSweepingCompleted();
+
// 2. Initialize the objects. If we have allocated only byte arrays
// for some objects, we now overwrite the byte arrays with the
// correct object fields. Note that this phase does not allocate
@@ -1397,9 +1403,9 @@ TranslatedValue* TranslatedState::GetValueByObjectIndex(int object_index) {
}
Handle<HeapObject> TranslatedState::InitializeObjectAt(TranslatedValue* slot) {
- slot = ResolveCapturedObject(slot);
-
DisallowGarbageCollection no_gc;
+
+ slot = ResolveCapturedObject(slot);
if (slot->materialization_state() != TranslatedValue::kFinished) {
std::stack<int> worklist;
worklist.push(slot->object_index());
@@ -1883,7 +1889,7 @@ void TranslatedState::InitializeJSObjectAt(
WRITE_BARRIER(*object_storage, offset, *field_value);
}
}
- object_storage->synchronized_set_map(*map);
+ object_storage->set_map(*map, kReleaseStore);
}
void TranslatedState::InitializeObjectWithTaggedFieldsAt(
@@ -1920,7 +1926,7 @@ void TranslatedState::InitializeObjectWithTaggedFieldsAt(
WRITE_BARRIER(*object_storage, offset, *field_value);
}
- object_storage->synchronized_set_map(*map);
+ object_storage->set_map(*map, kReleaseStore);
}
TranslatedValue* TranslatedState::ResolveCapturedObject(TranslatedValue* slot) {
diff --git a/chromium/v8/src/diagnostics/compilation-statistics.cc b/chromium/v8/src/diagnostics/compilation-statistics.cc
index d7a67617e72..40bb239b125 100644
--- a/chromium/v8/src/diagnostics/compilation-statistics.cc
+++ b/chromium/v8/src/diagnostics/compilation-statistics.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <ostream> // NOLINT(readability/streams)
+#include "src/diagnostics/compilation-statistics.h"
+
+#include <ostream>
#include <vector>
#include "src/base/platform/platform.h"
-#include "src/diagnostics/compilation-statistics.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/diagnostics/disassembler.cc b/chromium/v8/src/diagnostics/disassembler.cc
index 6e6ecb3f034..91a7a72bbf5 100644
--- a/chromium/v8/src/diagnostics/disassembler.cc
+++ b/chromium/v8/src/diagnostics/disassembler.cc
@@ -237,10 +237,11 @@ static void PrintRelocInfo(StringBuilder* out, Isolate* isolate,
out->AddFormatted(" ;; %sobject: %s",
is_compressed ? "(compressed) " : "", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ Address address = relocinfo->target_external_reference();
const char* reference_name =
- ref_encoder ? ref_encoder->NameOfAddress(
- isolate, relocinfo->target_external_reference())
- : "unknown";
+ ref_encoder
+ ? ref_encoder->NameOfAddress(isolate, address)
+ : ExternalReferenceTable::NameOfIsolateIndependentAddress(address);
out->AddFormatted(" ;; external reference (%s)", reference_name);
} else if (RelocInfo::IsCodeTargetMode(rmode)) {
out->AddFormatted(" ;; code:");
@@ -441,14 +442,16 @@ int Disassembler::Decode(Isolate* isolate, std::ostream* os, byte* begin,
"Builtins disassembly requires a readable .text section");
V8NameConverter v8NameConverter(isolate, code);
if (isolate) {
- // We have an isolate, so support external reference names.
+ // We have an isolate, so support external reference names from V8 and
+ // embedder.
SealHandleScope shs(isolate);
DisallowGarbageCollection no_alloc;
ExternalReferenceEncoder ref_encoder(isolate);
return DecodeIt(isolate, &ref_encoder, os, code, v8NameConverter, begin,
end, current_pc);
} else {
- // No isolate => isolate-independent code. No external reference names.
+ // No isolate => isolate-independent code. Only V8 External references
+ // available.
return DecodeIt(nullptr, nullptr, os, code, v8NameConverter, begin, end,
current_pc);
}
diff --git a/chromium/v8/src/diagnostics/eh-frame.cc b/chromium/v8/src/diagnostics/eh-frame.cc
index ccafd23cb70..0c85ae3ca41 100644
--- a/chromium/v8/src/diagnostics/eh-frame.cc
+++ b/chromium/v8/src/diagnostics/eh-frame.cc
@@ -55,7 +55,7 @@ STATIC_CONST_MEMBER_DEFINITION const int EhFrameConstants::kEhFrameHdrSize;
STATIC_CONST_MEMBER_DEFINITION const uint32_t EhFrameWriter::kInt32Placeholder;
// static
-void EhFrameWriter::WriteEmptyEhFrame(std::ostream& stream) { // NOLINT
+void EhFrameWriter::WriteEmptyEhFrame(std::ostream& stream) {
stream.put(EhFrameConstants::kEhFrameHdrVersion);
// .eh_frame pointer encoding specifier.
@@ -480,7 +480,7 @@ class V8_NODISCARD StreamModifiersScope final {
} // namespace
// static
-void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
+void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream,
const byte* start,
const byte* end) {
StreamModifiersScope modifiers_scope(&stream);
@@ -592,7 +592,7 @@ void EhFrameDisassembler::DumpDwarfDirectives(std::ostream& stream, // NOLINT
}
}
-void EhFrameDisassembler::DisassembleToStream(std::ostream& stream) { // NOLINT
+void EhFrameDisassembler::DisassembleToStream(std::ostream& stream) {
// The encoded CIE size does not include the size field itself.
const int cie_size =
base::ReadUnalignedValue<uint32_t>(reinterpret_cast<Address>(start_)) +
diff --git a/chromium/v8/src/diagnostics/eh-frame.h b/chromium/v8/src/diagnostics/eh-frame.h
index 9a22a97d58e..0d32085b0b0 100644
--- a/chromium/v8/src/diagnostics/eh-frame.h
+++ b/chromium/v8/src/diagnostics/eh-frame.h
@@ -80,7 +80,7 @@ class V8_EXPORT_PRIVATE EhFrameWriter {
//
// It is effectively a valid eh_frame_hdr with an empty look up table.
//
- static void WriteEmptyEhFrame(std::ostream& stream); // NOLINT
+ static void WriteEmptyEhFrame(std::ostream& stream);
// Write the CIE and FDE header. Call it before any other method.
void Initialize();
@@ -293,11 +293,11 @@ class EhFrameDisassembler final {
EhFrameDisassembler(const EhFrameDisassembler&) = delete;
EhFrameDisassembler& operator=(const EhFrameDisassembler&) = delete;
- void DisassembleToStream(std::ostream& stream); // NOLINT
+ void DisassembleToStream(std::ostream& stream);
private:
- static void DumpDwarfDirectives(std::ostream& stream, // NOLINT
- const byte* start, const byte* end);
+ static void DumpDwarfDirectives(std::ostream& stream, const byte* start,
+ const byte* end);
static const char* DwarfRegisterCodeToString(int code);
diff --git a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
index ae4cc02459a..687a214cab3 100644
--- a/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/chromium/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -368,7 +368,7 @@ class DisassemblerIA32 {
Disassembler::kAbortOnUnimplementedOpcode) {
FATAL("Unimplemented instruction in disassembler");
} else {
- AppendToBuffer("'Unimplemented Instruction'");
+ AppendToBuffer("'Unimplemented instruction'");
}
}
};
@@ -2076,6 +2076,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 3;
} else {
UnimplementedInstruction();
+ data += 1;
}
} break;
@@ -2889,7 +2890,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
outp += v8::internal::SNPrintF(out_buffer + outp, " %s", tmp_buffer_.begin());
return instr_len;
-} // NOLINT (function is too long)
+}
//------------------------------------------------------------------------------
diff --git a/chromium/v8/src/diagnostics/objects-debug.cc b/chromium/v8/src/diagnostics/objects-debug.cc
index b48df9385ae..6cfd8b3ad0c 100644
--- a/chromium/v8/src/diagnostics/objects-debug.cc
+++ b/chromium/v8/src/diagnostics/objects-debug.cc
@@ -61,6 +61,7 @@
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/maybe-object.h"
+#include "src/objects/megadom-handler-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/oddball-inl.h"
@@ -115,7 +116,7 @@ namespace internal {
}
void Object::ObjectVerify(Isolate* isolate) {
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kObjectVerify);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kObjectVerify);
if (IsSmi()) {
Smi::cast(*this).SmiVerify(isolate);
} else {
@@ -137,7 +138,7 @@ void MaybeObject::VerifyMaybeObjectPointer(Isolate* isolate, MaybeObject p) {
if (p->GetHeapObject(&heap_object)) {
HeapObject::VerifyHeapPointer(isolate, heap_object);
} else {
- CHECK(p->IsSmi() || p->IsCleared());
+ CHECK(p->IsSmi() || p->IsCleared() || MapWord::IsPacked(p->ptr()));
}
}
@@ -457,7 +458,11 @@ void Map::MapVerify(Isolate* isolate) {
(kTaggedSize <= instance_size() &&
static_cast<size_t>(instance_size()) < heap->Capacity()));
if (IsContextMap()) {
- CHECK(native_context().IsNativeContext());
+ // The map for the NativeContext is allocated before the NativeContext
+ // itself, so it may happen that during a GC the native_context() is still
+ // null.
+ CHECK(native_context_or_null().IsNull() ||
+ native_context().IsNativeContext());
} else {
if (GetBackPointer().IsUndefined(isolate)) {
// Root maps must not have descriptors in the descriptor array that do not
@@ -675,7 +680,7 @@ void SloppyArgumentsElementsVerify(Isolate* isolate,
for (int i = 0; i < nofMappedParameters; i++) {
// Verify that each context-mapped argument is either the hole or a valid
// Smi within context length range.
- Object mapped = elements.mapped_entries(i);
+ Object mapped = elements.mapped_entries(i, kRelaxedLoad);
if (mapped.IsTheHole(isolate)) {
// Slow sloppy arguments can be holey.
if (!is_fast) continue;
@@ -828,7 +833,6 @@ void JSFunction::JSFunctionVerify(Isolate* isolate) {
void SharedFunctionInfo::SharedFunctionInfoVerify(Isolate* isolate) {
// TODO(leszeks): Add a TorqueGeneratedClassVerifier for LocalIsolate.
- TorqueGeneratedClassVerifiers::SharedFunctionInfoVerify(*this, isolate);
this->SharedFunctionInfoVerify(ReadOnlyRoots(isolate));
}
@@ -1433,7 +1437,7 @@ void JSArrayBufferView::JSArrayBufferViewVerify(Isolate* isolate) {
void JSTypedArray::JSTypedArrayVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::JSTypedArrayVerify(*this, isolate);
- CHECK_LE(length(), JSTypedArray::kMaxLength);
+ CHECK_LE(GetLength(), JSTypedArray::kMaxLength);
}
void JSDataView::JSDataViewVerify(Isolate* isolate) {
@@ -1685,7 +1689,13 @@ void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
void Script::ScriptVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::ScriptVerify(*this, isolate);
- for (int i = 0; i < shared_function_infos().length(); ++i) {
+ if V8_UNLIKELY (type() == Script::TYPE_WEB_SNAPSHOT) {
+ CHECK_LE(shared_function_info_count(), shared_function_infos().length());
+ } else {
+ // No overallocating shared_function_infos.
+ CHECK_EQ(shared_function_info_count(), shared_function_infos().length());
+ }
+ for (int i = 0; i < shared_function_info_count(); ++i) {
MaybeObject maybe_object = shared_function_infos().Get(i);
HeapObject heap_object;
CHECK(maybe_object->IsWeak() || maybe_object->IsCleared() ||
@@ -1736,6 +1746,12 @@ void StackFrameInfo::StackFrameInfoVerify(Isolate* isolate) {
#endif // V8_ENABLE_WEBASSEMBLY
}
+void FunctionTemplateRareData::FunctionTemplateRareDataVerify(
+ Isolate* isolate) {
+ CHECK(c_function_overloads().IsFixedArray() ||
+ c_function_overloads().IsUndefined(isolate));
+}
+
#endif // VERIFY_HEAP
#ifdef DEBUG
@@ -1795,6 +1811,7 @@ void JSObject::IncrementSpillStatistics(Isolate* isolate,
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
{
info->number_of_objects_with_fast_elements_++;
diff --git a/chromium/v8/src/diagnostics/objects-printer.cc b/chromium/v8/src/diagnostics/objects-printer.cc
index aa1713e3b37..2c416513a9f 100644
--- a/chromium/v8/src/diagnostics/objects-printer.cc
+++ b/chromium/v8/src/diagnostics/objects-printer.cc
@@ -42,7 +42,7 @@ void Object::Print() const {
os << std::flush;
}
-void Object::Print(std::ostream& os) const { // NOLINT
+void Object::Print(std::ostream& os) const {
if (IsSmi()) {
os << "Smi: " << std::hex << "0x" << Smi::ToInt(*this);
os << std::dec << " (" << Smi::ToInt(*this) << ")\n";
@@ -54,7 +54,7 @@ void Object::Print(std::ostream& os) const { // NOLINT
namespace {
void PrintHeapObjectHeaderWithoutMap(HeapObject object, std::ostream& os,
- const char* id) { // NOLINT
+ const char* id) {
os << reinterpret_cast<void*>(object.ptr()) << ": [";
if (id != nullptr) {
os << id;
@@ -96,12 +96,12 @@ void PrintDictionaryContents(std::ostream& os, T dict) {
}
} // namespace
-void HeapObject::PrintHeader(std::ostream& os, const char* id) { // NOLINT
+void HeapObject::PrintHeader(std::ostream& os, const char* id) {
PrintHeapObjectHeaderWithoutMap(*this, os, id);
if (!IsMap()) os << "\n - map: " << Brief(map());
}
-void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
+void HeapObject::HeapObjectPrint(std::ostream& os) {
InstanceType instance_type = map().instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
@@ -261,24 +261,24 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
}
}
-void ByteArray::ByteArrayPrint(std::ostream& os) { // NOLINT
+void ByteArray::ByteArrayPrint(std::ostream& os) {
PrintHeader(os, "ByteArray");
os << "\n - length: " << length()
<< "\n - data-start: " << static_cast<void*>(GetDataStartAddress())
<< "\n";
}
-void BytecodeArray::BytecodeArrayPrint(std::ostream& os) { // NOLINT
+void BytecodeArray::BytecodeArrayPrint(std::ostream& os) {
PrintHeader(os, "BytecodeArray");
os << "\n";
Disassemble(os);
}
-void FreeSpace::FreeSpacePrint(std::ostream& os) { // NOLINT
+void FreeSpace::FreeSpacePrint(std::ostream& os) {
os << "free space, size " << Size() << "\n";
}
-bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
+bool JSObject::PrintProperties(std::ostream& os) {
if (HasFastProperties()) {
DescriptorArray descs = map().instance_descriptors(GetIsolate());
int nof_inobject_properties = map().GetInObjectProperties();
@@ -344,7 +344,7 @@ double GetScalarElement(T array, int index) {
}
template <class T>
-void DoPrintElements(std::ostream& os, Object object, int length) { // NOLINT
+void DoPrintElements(std::ostream& os, Object object, int length) {
const bool print_the_hole = std::is_same<T, FixedDoubleArray>::value;
T array = T::cast(object);
if (length == 0) return;
@@ -448,7 +448,7 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
<< "\n 1: arguments_store: " << Brief(arguments_store)
<< "\n parameter to context slot map:";
for (int i = 0; i < elements.length(); i++) {
- Object mapped_entry = elements.mapped_entries(i);
+ Object mapped_entry = elements.mapped_entries(i, kRelaxedLoad);
os << "\n " << i << ": param(" << i << "): " << Brief(mapped_entry);
if (mapped_entry.IsTheHole()) {
os << " in the arguments_store[" << i << "]";
@@ -468,20 +468,20 @@ void PrintSloppyArgumentElements(std::ostream& os, ElementsKind kind,
}
}
-void PrintEmbedderData(PtrComprCageBase cage_base, std::ostream& os,
+void PrintEmbedderData(Isolate* isolate, std::ostream& os,
EmbedderDataSlot slot) {
DisallowGarbageCollection no_gc;
Object value = slot.load_tagged();
os << Brief(value);
void* raw_pointer;
- if (slot.ToAlignedPointer(cage_base, &raw_pointer)) {
+ if (slot.ToAlignedPointer(isolate, &raw_pointer)) {
os << ", aligned pointer: " << raw_pointer;
}
}
} // namespace
-void JSObject::PrintElements(std::ostream& os) { // NOLINT
+void JSObject::PrintElements(std::ostream& os) {
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
os << " - elements: " << Brief(elements()) << " {";
@@ -508,7 +508,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
#define PRINT_ELEMENTS(Type, type, TYPE, elementType) \
case TYPE##_ELEMENTS: { \
- size_t length = JSTypedArray::cast(*this).length(); \
+ size_t length = JSTypedArray::cast(*this).GetLength(); \
bool is_on_heap = JSTypedArray::cast(*this).is_on_heap(); \
const elementType* data_ptr = \
static_cast<const elementType*>(JSTypedArray::cast(*this).DataPtr()); \
@@ -516,6 +516,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
break; \
}
TYPED_ARRAYS(PRINT_ELEMENTS)
+ RAB_GSAB_TYPED_ARRAYS(PRINT_ELEMENTS)
#undef PRINT_ELEMENTS
case DICTIONARY_ELEMENTS:
@@ -534,7 +535,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
}
static void JSObjectPrintHeader(std::ostream& os, JSObject obj,
- const char* id) { // NOLINT
+ const char* id) {
Isolate* isolate = obj.GetIsolate();
obj.PrintHeader(os, id);
// Don't call GetElementsKind, its validation code can cause the printer to
@@ -560,8 +561,7 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject obj,
}
}
-static void JSObjectPrintBody(std::ostream& os,
- JSObject obj, // NOLINT
+static void JSObjectPrintBody(std::ostream& os, JSObject obj,
bool print_elements = true) {
os << "\n - properties: ";
Object properties_or_hash = obj.raw_properties_or_hash();
@@ -573,28 +573,28 @@ static void JSObjectPrintBody(std::ostream& os,
os << "}\n";
if (print_elements) {
- size_t length = obj.IsJSTypedArray() ? JSTypedArray::cast(obj).length()
+ size_t length = obj.IsJSTypedArray() ? JSTypedArray::cast(obj).GetLength()
: obj.elements().length();
if (length > 0) obj.PrintElements(os);
}
int embedder_fields = obj.GetEmbedderFieldCount();
if (embedder_fields > 0) {
- PtrComprCageBase cage_base = GetPtrComprCageBase(obj);
+ Isolate* isolate = GetIsolateForHeapSandbox(obj);
os << " - embedder fields = {";
for (int i = 0; i < embedder_fields; i++) {
os << "\n ";
- PrintEmbedderData(cage_base, os, EmbedderDataSlot(obj, i));
+ PrintEmbedderData(isolate, os, EmbedderDataSlot(obj, i));
}
os << "\n }\n";
}
}
-void JSObject::JSObjectPrint(std::ostream& os) { // NOLINT
+void JSObject::JSObjectPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, nullptr);
JSObjectPrintBody(os, *this);
}
-void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
+void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSGeneratorObject");
os << "\n - function: " << Brief(function());
os << "\n - context: " << Brief(context());
@@ -654,13 +654,13 @@ void JSGeneratorObject::JSGeneratorObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
+void JSArray::JSArrayPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSArray");
os << "\n - length: " << Brief(this->length());
JSObjectPrintBody(os, *this);
}
-void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
+void JSPromise::JSPromisePrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSPromise");
os << "\n - status: " << JSPromise::Status(status());
if (status() == Promise::kPending) {
@@ -673,15 +673,14 @@ void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
+void JSRegExp::JSRegExpPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSRegExp");
os << "\n - data: " << Brief(data());
os << "\n - source: " << Brief(source());
JSObjectPrintBody(os, *this);
}
-void JSRegExpStringIterator::JSRegExpStringIteratorPrint(
- std::ostream& os) { // NOLINT
+void JSRegExpStringIterator::JSRegExpStringIteratorPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSRegExpStringIterator");
os << "\n - regex: " << Brief(iterating_reg_exp());
os << "\n - string: " << Brief(iterated_string());
@@ -691,7 +690,7 @@ void JSRegExpStringIterator::JSRegExpStringIteratorPrint(
JSObjectPrintBody(os, *this);
}
-void Symbol::SymbolPrint(std::ostream& os) { // NOLINT
+void Symbol::SymbolPrint(std::ostream& os) {
PrintHeader(os, "Symbol");
os << "\n - hash: " << hash();
os << "\n - description: " << Brief(description());
@@ -762,14 +761,14 @@ void ObjectBoilerplateDescription::ObjectBoilerplateDescriptionPrint(
}
void EmbedderDataArray::EmbedderDataArrayPrint(std::ostream& os) {
- PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ Isolate* isolate = GetIsolateForHeapSandbox(*this);
PrintHeader(os, "EmbedderDataArray");
os << "\n - length: " << length();
EmbedderDataSlot start(*this, 0);
EmbedderDataSlot end(*this, length());
for (EmbedderDataSlot slot = start; slot < end; ++slot) {
os << "\n ";
- PrintEmbedderData(cage_base, os, slot);
+ PrintEmbedderData(isolate, os, slot);
}
os << "\n";
}
@@ -779,6 +778,34 @@ void FixedArray::FixedArrayPrint(std::ostream& os) {
}
namespace {
+const char* SideEffectType2String(SideEffectType type) {
+ switch (type) {
+ case SideEffectType::kHasSideEffect:
+ return "kHasSideEffect";
+ case SideEffectType::kHasNoSideEffect:
+ return "kHasNoSideEffect";
+ case SideEffectType::kHasSideEffectToReceiver:
+ return "kHasSideEffectToReceiver";
+ }
+}
+} // namespace
+
+void AccessorInfo::AccessorInfoPrint(std::ostream& os) {
+ TorqueGeneratedAccessorInfo<AccessorInfo, Struct>::AccessorInfoPrint(os);
+ os << " - all_can_read: " << all_can_read();
+ os << "\n - all_can_write: " << all_can_write();
+ os << "\n - is_special_data_property: " << is_special_data_property();
+ os << "\n - is_sloppy: " << is_sloppy();
+ os << "\n - replace_on_access: " << replace_on_access();
+ os << "\n - getter_side_effect_type: "
+ << SideEffectType2String(getter_side_effect_type());
+ os << "\n - setter_side_effect_type: "
+ << SideEffectType2String(setter_side_effect_type());
+ os << "\n - initial_attributes: " << initial_property_attributes();
+ os << '\n';
+}
+
+namespace {
void PrintContextWithHeader(std::ostream& os, Context context,
const char* type) {
context.PrintHeader(os, type);
@@ -1021,7 +1048,7 @@ void SwissNameDictionary::SwissNameDictionaryPrint(std::ostream& os) {
os.flags(sav_flags);
}
-void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
+void PropertyArray::PropertyArrayPrint(std::ostream& os) {
PrintHeader(os, "PropertyArray");
os << "\n - length: " << length();
os << "\n - hash: " << Hash();
@@ -1029,7 +1056,7 @@ void PropertyArray::PropertyArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
+void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) {
PrintHeader(os, "FixedDoubleArray");
os << "\n - length: " << length();
DoPrintElements<FixedDoubleArray>(os, *this, length());
@@ -1051,12 +1078,12 @@ void WeakArrayList::WeakArrayListPrint(std::ostream& os) {
os << "\n";
}
-void TransitionArray::TransitionArrayPrint(std::ostream& os) { // NOLINT
+void TransitionArray::TransitionArrayPrint(std::ostream& os) {
PrintHeader(os, "TransitionArray");
PrintInternal(os);
}
-void FeedbackCell::FeedbackCellPrint(std::ostream& os) { // NOLINT
+void FeedbackCell::FeedbackCellPrint(std::ostream& os) {
PrintHeader(os, "FeedbackCell");
ReadOnlyRoots roots = GetReadOnlyRoots();
if (map() == roots.no_closures_cell_map()) {
@@ -1081,7 +1108,7 @@ void FeedbackVectorSpec::Print() {
os << std::flush;
}
-void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) { // NOLINT
+void FeedbackVectorSpec::FeedbackVectorSpecPrint(std::ostream& os) {
os << " - slot_count: " << slot_count();
if (slot_count() == 0) {
os << " (empty)\n";
@@ -1116,7 +1143,7 @@ void ClosureFeedbackCellArray::ClosureFeedbackCellArrayPrint(std::ostream& os) {
PrintFixedArrayWithHeader(os, *this, "ClosureFeedbackCellArray");
}
-void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
+void FeedbackVector::FeedbackVectorPrint(std::ostream& os) {
PrintHeader(os, "FeedbackVector");
os << "\n - length: " << length();
if (length() == 0) {
@@ -1157,13 +1184,12 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void FeedbackVector::FeedbackSlotPrint(std::ostream& os,
- FeedbackSlot slot) { // NOLINT
+void FeedbackVector::FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot) {
FeedbackNexus nexus(*this, slot);
nexus.Print(os);
}
-void FeedbackNexus::Print(std::ostream& os) { // NOLINT
+void FeedbackNexus::Print(std::ostream& os) {
switch (kind()) {
case FeedbackSlotKind::kCall:
case FeedbackSlotKind::kCloneObject:
@@ -1206,7 +1232,7 @@ void FeedbackNexus::Print(std::ostream& os) { // NOLINT
}
}
-void Oddball::OddballPrint(std::ostream& os) { // NOLINT
+void Oddball::OddballPrint(std::ostream& os) {
PrintHeapObjectHeaderWithoutMap(*this, os, "Oddball");
os << ": ";
String s = to_string();
@@ -1216,42 +1242,39 @@ void Oddball::OddballPrint(std::ostream& os) { // NOLINT
os << std::endl;
}
-void JSAsyncFunctionObject::JSAsyncFunctionObjectPrint(
- std::ostream& os) { // NOLINT
+void JSAsyncFunctionObject::JSAsyncFunctionObjectPrint(std::ostream& os) {
JSGeneratorObjectPrint(os);
}
-void JSAsyncGeneratorObject::JSAsyncGeneratorObjectPrint(
- std::ostream& os) { // NOLINT
+void JSAsyncGeneratorObject::JSAsyncGeneratorObjectPrint(std::ostream& os) {
JSGeneratorObjectPrint(os);
}
-void JSArgumentsObject::JSArgumentsObjectPrint(std::ostream& os) { // NOLINT
+void JSArgumentsObject::JSArgumentsObjectPrint(std::ostream& os) {
JSObjectPrint(os);
}
-void JSStringIterator::JSStringIteratorPrint(std::ostream& os) { // NOLINT
+void JSStringIterator::JSStringIteratorPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSStringIterator");
os << "\n - string: " << Brief(string());
os << "\n - index: " << index();
JSObjectPrintBody(os, *this);
}
-void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorPrint(
- std::ostream& os) { // NOLINT
+void JSAsyncFromSyncIterator::JSAsyncFromSyncIteratorPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSAsyncFromSyncIterator");
os << "\n - sync_iterator: " << Brief(sync_iterator());
os << "\n - next: " << Brief(next());
JSObjectPrintBody(os, *this);
}
-void JSPrimitiveWrapper::JSPrimitiveWrapperPrint(std::ostream& os) { // NOLINT
+void JSPrimitiveWrapper::JSPrimitiveWrapperPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSPrimitiveWrapper");
os << "\n - value: " << Brief(value());
JSObjectPrintBody(os, *this);
}
-void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
+void JSMessageObject::JSMessageObjectPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSMessageObject");
os << "\n - type: " << static_cast<int>(type());
os << "\n - arguments: " << Brief(argument());
@@ -1262,7 +1285,7 @@ void JSMessageObject::JSMessageObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void String::StringPrint(std::ostream& os) { // NOLINT
+void String::StringPrint(std::ostream& os) {
PrintHeapObjectHeaderWithoutMap(*this, os, "String");
os << ": ";
os << PrefixForDebugPrint();
@@ -1270,7 +1293,7 @@ void String::StringPrint(std::ostream& os) { // NOLINT
os << SuffixForDebugPrint();
}
-void Name::NamePrint(std::ostream& os) { // NOLINT
+void Name::NamePrint(std::ostream& os) {
if (IsString()) {
String::cast(*this).StringPrint(os);
} else {
@@ -1281,7 +1304,7 @@ void Name::NamePrint(std::ostream& os) { // NOLINT
static const char* const weekdays[] = {"???", "Sun", "Mon", "Tue",
"Wed", "Thu", "Fri", "Sat"};
-void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
+void JSDate::JSDatePrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSDate");
os << "\n - value: " << Brief(value());
if (!year().IsSmi()) {
@@ -1302,7 +1325,7 @@ void JSDate::JSDatePrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
+void JSProxy::JSProxyPrint(std::ostream& os) {
PrintHeader(os, "JSProxy");
os << "\n - target: ";
target().ShortPrint(os);
@@ -1311,31 +1334,31 @@ void JSProxy::JSProxyPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void JSSet::JSSetPrint(std::ostream& os) { // NOLINT
+void JSSet::JSSetPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSet");
os << " - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
-void JSMap::JSMapPrint(std::ostream& os) { // NOLINT
+void JSMap::JSMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSMap");
os << " - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
-void JSCollectionIterator::JSCollectionIteratorPrint(
- std::ostream& os, const char* name) { // NOLINT
+void JSCollectionIterator::JSCollectionIteratorPrint(std::ostream& os,
+ const char* name) {
JSObjectPrintHeader(os, *this, name);
os << "\n - table: " << Brief(table());
os << "\n - index: " << Brief(index());
JSObjectPrintBody(os, *this);
}
-void JSSetIterator::JSSetIteratorPrint(std::ostream& os) { // NOLINT
+void JSSetIterator::JSSetIteratorPrint(std::ostream& os) {
JSCollectionIteratorPrint(os, "JSSetIterator");
}
-void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
+void JSMapIterator::JSMapIteratorPrint(std::ostream& os) {
JSCollectionIteratorPrint(os, "JSMapIterator");
}
@@ -1377,19 +1400,19 @@ void JSFinalizationRegistry::JSFinalizationRegistryPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
-void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
+void JSWeakMap::JSWeakMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakMap");
os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
-void JSWeakSet::JSWeakSetPrint(std::ostream& os) { // NOLINT
+void JSWeakSet::JSWeakSetPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakSet");
os << "\n - table: " << Brief(table());
JSObjectPrintBody(os, *this);
}
-void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
+void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSArrayBuffer");
os << "\n - backing_store: " << backing_store();
os << "\n - byte_length: " << byte_length();
@@ -1397,15 +1420,16 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
if (is_detachable()) os << "\n - detachable";
if (was_detached()) os << "\n - detached";
if (is_shared()) os << "\n - shared";
+ if (is_resizable()) os << "\n - resizable";
JSObjectPrintBody(os, *this, !was_detached());
}
-void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
+void JSTypedArray::JSTypedArrayPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSTypedArray");
os << "\n - buffer: " << Brief(buffer());
os << "\n - byte_offset: " << byte_offset();
os << "\n - byte_length: " << byte_length();
- os << "\n - length: " << length();
+ os << "\n - length: " << GetLength();
os << "\n - data_ptr: " << DataPtr();
Tagged_t base_ptr = static_cast<Tagged_t>(base_pointer().ptr());
os << "\n - base_pointer: "
@@ -1417,6 +1441,8 @@ void JSTypedArray::JSTypedArrayPrint(std::ostream& os) { // NOLINT
return;
}
if (WasDetached()) os << "\n - detached";
+ if (is_length_tracking()) os << "\n - length-tracking";
+ if (is_backed_by_rab()) os << "\n - backed-by-rab";
JSObjectPrintBody(os, *this, !WasDetached());
}
@@ -1428,7 +1454,7 @@ void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
JSObjectPrintBody(os, *this);
}
-void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
+void JSDataView::JSDataViewPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSDataView");
os << "\n - buffer =" << Brief(buffer());
os << "\n - byte_offset: " << byte_offset();
@@ -1441,7 +1467,7 @@ void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this, !WasDetached());
}
-void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
+void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSBoundFunction");
os << "\n - bound_target_function: " << Brief(bound_target_function());
os << "\n - bound_this: " << Brief(bound_this());
@@ -1449,7 +1475,7 @@ void JSBoundFunction::JSBoundFunctionPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSFunction::JSFunctionPrint(std::ostream& os) { // NOLINT
+void JSFunction::JSFunctionPrint(std::ostream& os) {
Isolate* isolate = GetIsolate();
JSObjectPrintHeader(os, *this, "Function");
os << "\n - function prototype: ";
@@ -1525,7 +1551,7 @@ void SharedFunctionInfo::PrintSourceCode(std::ostream& os) {
}
}
-void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
+void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {
PrintHeader(os, "SharedFunctionInfo");
os << "\n - name: ";
if (HasSharedName()) {
@@ -1576,7 +1602,7 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
+void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSGlobalProxy");
if (!GetIsolate()->bootstrapper()->IsActive()) {
os << "\n - native context: " << Brief(native_context());
@@ -1584,7 +1610,7 @@ void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
+void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSGlobalObject");
if (!GetIsolate()->bootstrapper()->IsActive()) {
os << "\n - native context: " << Brief(native_context());
@@ -1593,7 +1619,7 @@ void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
+void PropertyCell::PropertyCellPrint(std::ostream& os) {
PrintHeader(os, "PropertyCell");
os << "\n - name: ";
name().NamePrint(os);
@@ -1605,7 +1631,7 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void Code::CodePrint(std::ostream& os) { // NOLINT
+void Code::CodePrint(std::ostream& os) {
PrintHeader(os, "Code");
os << "\n";
#ifdef ENABLE_DISASSEMBLER
@@ -1613,26 +1639,26 @@ void Code::CodePrint(std::ostream& os) { // NOLINT
#endif
}
-void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) { // NOLINT
+void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
PrintHeader(os, "CodeDataContainer");
os << "\n - kind_specific_flags: " << kind_specific_flags();
os << "\n";
}
-void Foreign::ForeignPrint(std::ostream& os) { // NOLINT
+void Foreign::ForeignPrint(std::ostream& os) {
PrintHeader(os, "Foreign");
os << "\n - foreign address : " << reinterpret_cast<void*>(foreign_address());
os << "\n";
}
-void CallbackTask::CallbackTaskPrint(std::ostream& os) { // NOLINT
+void CallbackTask::CallbackTaskPrint(std::ostream& os) {
PrintHeader(os, "CallbackTask");
os << "\n - callback: " << Brief(callback());
os << "\n - data: " << Brief(data());
os << "\n";
}
-void CallableTask::CallableTaskPrint(std::ostream& os) { // NOLINT
+void CallableTask::CallableTaskPrint(std::ostream& os) {
PrintHeader(os, "CallableTask");
os << "\n - context: " << Brief(context());
os << "\n - callable: " << Brief(callable());
@@ -1640,7 +1666,7 @@ void CallableTask::CallableTaskPrint(std::ostream& os) { // NOLINT
}
void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
- std::ostream& os) { // NOLINT
+ std::ostream& os) {
PrintHeader(os, "PromiseFulfillReactionJobTask");
os << "\n - argument: " << Brief(argument());
os << "\n - context: " << Brief(context());
@@ -1650,7 +1676,7 @@ void PromiseFulfillReactionJobTask::PromiseFulfillReactionJobTaskPrint(
}
void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
- std::ostream& os) { // NOLINT
+ std::ostream& os) {
PrintHeader(os, "PromiseRejectReactionJobTask");
os << "\n - argument: " << Brief(argument());
os << "\n - context: " << Brief(context());
@@ -1660,7 +1686,7 @@ void PromiseRejectReactionJobTask::PromiseRejectReactionJobTaskPrint(
}
void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
- std::ostream& os) { // NOLINT
+ std::ostream& os) {
PrintHeader(os, "PromiseResolveThenableJobTask");
os << "\n - context: " << Brief(context());
os << "\n - promise_to_resolve: " << Brief(promise_to_resolve());
@@ -1669,7 +1695,7 @@ void PromiseResolveThenableJobTask::PromiseResolveThenableJobTaskPrint(
os << "\n";
}
-void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
+void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) {
PrintHeader(os, "PromiseCapability");
os << "\n - promise: " << Brief(promise());
os << "\n - resolve: " << Brief(resolve());
@@ -1677,7 +1703,7 @@ void PromiseCapability::PromiseCapabilityPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void PromiseReaction::PromiseReactionPrint(std::ostream& os) { // NOLINT
+void PromiseReaction::PromiseReactionPrint(std::ostream& os) {
PrintHeader(os, "PromiseReaction");
os << "\n - next: " << Brief(next());
os << "\n - reject_handler: " << Brief(reject_handler());
@@ -1686,8 +1712,7 @@ void PromiseReaction::PromiseReactionPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(
- std::ostream& os) { // NOLINT
+void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(std::ostream& os) {
PrintHeader(os, "AsyncGeneratorRequest");
const char* mode = "Invalid!";
switch (resume_mode()) {
@@ -1708,7 +1733,7 @@ void AsyncGeneratorRequest::AsyncGeneratorRequestPrint(
}
void SourceTextModuleInfoEntry::SourceTextModuleInfoEntryPrint(
- std::ostream& os) { // NOLINT
+ std::ostream& os) {
PrintHeader(os, "SourceTextModuleInfoEntry");
os << "\n - export_name: " << Brief(export_name());
os << "\n - local_name: " << Brief(local_name());
@@ -1726,7 +1751,7 @@ static void PrintModuleFields(Module module, std::ostream& os) {
os << "\n - exception: " << Brief(module.exception());
}
-void Module::ModulePrint(std::ostream& os) { // NOLINT
+void Module::ModulePrint(std::ostream& os) {
if (this->IsSourceTextModule()) {
SourceTextModule::cast(*this).SourceTextModulePrint(os);
} else if (this->IsSyntheticModule()) {
@@ -1736,7 +1761,7 @@ void Module::ModulePrint(std::ostream& os) { // NOLINT
}
}
-void SourceTextModule::SourceTextModulePrint(std::ostream& os) { // NOLINT
+void SourceTextModule::SourceTextModulePrint(std::ostream& os) {
PrintHeader(os, "SourceTextModule");
PrintModuleFields(*this, os);
os << "\n - sfi/code/info: " << Brief(code());
@@ -1744,13 +1769,13 @@ void SourceTextModule::SourceTextModulePrint(std::ostream& os) { // NOLINT
os << "\n - script: " << Brief(script);
os << "\n - origin: " << Brief(script.GetNameOrSourceURL());
os << "\n - requested_modules: " << Brief(requested_modules());
- os << "\n - import_meta: " << Brief(import_meta());
+ os << "\n - import_meta: " << Brief(import_meta(kAcquireLoad));
os << "\n - cycle_root: " << Brief(cycle_root());
os << "\n - async_evaluating_ordinal: " << async_evaluating_ordinal();
os << "\n";
}
-void SyntheticModule::SyntheticModulePrint(std::ostream& os) { // NOLINT
+void SyntheticModule::SyntheticModulePrint(std::ostream& os) {
PrintHeader(os, "SyntheticModule");
PrintModuleFields(*this, os);
os << "\n - export_names: " << Brief(export_names());
@@ -1758,13 +1783,13 @@ void SyntheticModule::SyntheticModulePrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) { // NOLINT
+void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSModuleNamespace");
os << "\n - module: " << Brief(module());
JSObjectPrintBody(os, *this);
}
-void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
+void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) {
PrintHeader(os, "PrototypeInfo");
os << "\n - module namespace: " << Brief(module_namespace());
os << "\n - prototype users: " << Brief(prototype_users());
@@ -1774,7 +1799,7 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void ClassPositions::ClassPositionsPrint(std::ostream& os) { // NOLINT
+void ClassPositions::ClassPositionsPrint(std::ostream& os) {
PrintHeader(os, "ClassPositions");
os << "\n - start position: " << start();
os << "\n - end position: " << end();
@@ -1782,15 +1807,15 @@ void ClassPositions::ClassPositionsPrint(std::ostream& os) { // NOLINT
}
void ArrayBoilerplateDescription::ArrayBoilerplateDescriptionPrint(
- std::ostream& os) { // NOLINT
+ std::ostream& os) {
PrintHeader(os, "ArrayBoilerplateDescription");
- os << "\n - elements kind: " << elements_kind();
+ os << "\n - elements kind: " << ElementsKindToString(elements_kind());
os << "\n - constant elements: " << Brief(constant_elements());
os << "\n";
}
void RegExpBoilerplateDescription::RegExpBoilerplateDescriptionPrint(
- std::ostream& os) { // NOLINT
+ std::ostream& os) {
PrintHeader(os, "RegExpBoilerplateDescription");
os << "\n - data: " << Brief(data());
os << "\n - source: " << Brief(source());
@@ -1799,7 +1824,7 @@ void RegExpBoilerplateDescription::RegExpBoilerplateDescriptionPrint(
}
#if V8_ENABLE_WEBASSEMBLY
-void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
+void AsmWasmData::AsmWasmDataPrint(std::ostream& os) {
PrintHeader(os, "AsmWasmData");
os << "\n - native module: " << Brief(managed_native_module());
os << "\n - export_wrappers: " << Brief(export_wrappers());
@@ -1807,13 +1832,13 @@ void AsmWasmData::AsmWasmDataPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmTypeInfo::WasmTypeInfoPrint(std::ostream& os) { // NOLINT
+void WasmTypeInfo::WasmTypeInfoPrint(std::ostream& os) {
PrintHeader(os, "WasmTypeInfo");
os << "\n - type address: " << reinterpret_cast<void*>(foreign_address());
os << "\n";
}
-void WasmStruct::WasmStructPrint(std::ostream& os) { // NOLINT
+void WasmStruct::WasmStructPrint(std::ostream& os) {
PrintHeader(os, "WasmStruct");
wasm::StructType* struct_type = type();
os << "\n - fields (" << struct_type->field_count() << "):";
@@ -1851,7 +1876,7 @@ void WasmStruct::WasmStructPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmArray::WasmArrayPrint(std::ostream& os) { // NOLINT
+void WasmArray::WasmArrayPrint(std::ostream& os) {
PrintHeader(os, "WasmArray");
wasm::ArrayType* array_type = type();
uint32_t len = length();
@@ -1891,13 +1916,13 @@ void WasmArray::WasmArrayPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmExceptionTag::WasmExceptionTagPrint(std::ostream& os) { // NOLINT
+void WasmExceptionTag::WasmExceptionTagPrint(std::ostream& os) {
PrintHeader(os, "WasmExceptionTag");
os << "\n - index: " << index();
os << "\n";
}
-void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
+void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "WasmInstanceObject");
os << "\n - module_object: " << Brief(module_object());
os << "\n - exports_object: " << Brief(exports_object());
@@ -1944,24 +1969,35 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmExportedFunctionData::WasmExportedFunctionDataPrint(
- std::ostream& os) { // NOLINT
+// Never called directly, as WasmFunctionData is an "abstract" class.
+void WasmFunctionData::WasmFunctionDataPrint(std::ostream& os) {
+ os << "\n - target: " << reinterpret_cast<void*>(foreign_address());
+ os << "\n - ref: " << Brief(ref());
+}
+
+void WasmExportedFunctionData::WasmExportedFunctionDataPrint(std::ostream& os) {
PrintHeader(os, "WasmExportedFunctionData");
+ WasmFunctionDataPrint(os);
os << "\n - wrapper_code: " << Brief(wrapper_code());
os << "\n - instance: " << Brief(instance());
- os << "\n - jump_table_offset: " << jump_table_offset();
os << "\n - function_index: " << function_index();
+ os << "\n - signature: " << Brief(signature());
+ os << "\n - wrapper_budget: " << wrapper_budget();
os << "\n";
}
-void WasmJSFunctionData::WasmJSFunctionDataPrint(std::ostream& os) { // NOLINT
+void WasmJSFunctionData::WasmJSFunctionDataPrint(std::ostream& os) {
PrintHeader(os, "WasmJSFunctionData");
- os << "\n - callable: " << Brief(callable());
+ WasmFunctionDataPrint(os);
os << "\n - wrapper_code: " << Brief(wrapper_code());
+ os << "\n - wasm_to_js_wrapper_code: " << Brief(wasm_to_js_wrapper_code());
+ os << "\n - serialized_return_count: " << serialized_return_count();
+ os << "\n - serialized_parameter_count: " << serialized_parameter_count();
+ os << "\n - serialized signature: " << Brief(serialized_signature());
os << "\n";
}
-void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
+void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmModuleObject");
os << "\n - module: " << module();
os << "\n - native module: " << native_module();
@@ -1970,7 +2006,7 @@ void WasmModuleObject::WasmModuleObjectPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmTableObject::WasmTableObjectPrint(std::ostream& os) { // NOLINT
+void WasmTableObject::WasmTableObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmTableObject");
os << "\n - elements: " << Brief(elements());
os << "\n - maximum_length: " << Brief(maximum_length());
@@ -1979,7 +2015,7 @@ void WasmTableObject::WasmTableObjectPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
+void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmGlobalObject");
if (type().is_reference()) {
os << "\n - tagged_buffer: " << Brief(tagged_buffer());
@@ -1994,7 +2030,7 @@ void WasmGlobalObject::WasmGlobalObjectPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmMemoryObject::WasmMemoryObjectPrint(std::ostream& os) { // NOLINT
+void WasmMemoryObject::WasmMemoryObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmMemoryObject");
os << "\n - array_buffer: " << Brief(array_buffer());
os << "\n - maximum_pages: " << maximum_pages();
@@ -2002,8 +2038,7 @@ void WasmMemoryObject::WasmMemoryObjectPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void WasmExceptionObject::WasmExceptionObjectPrint(
- std::ostream& os) { // NOLINT
+void WasmExceptionObject::WasmExceptionObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmExceptionObject");
os << "\n - serialized_signature: " << Brief(serialized_signature());
os << "\n - exception_tag: " << Brief(exception_tag());
@@ -2024,14 +2059,14 @@ void WasmIndirectFunctionTable::WasmIndirectFunctionTablePrint(
os << "\n";
}
-void WasmValueObject::WasmValueObjectPrint(std::ostream& os) { // NOLINT
+void WasmValueObject::WasmValueObjectPrint(std::ostream& os) {
PrintHeader(os, "WasmValueObject");
os << "\n - value: " << Brief(value());
os << "\n";
}
#endif // V8_ENABLE_WEBASSEMBLY
-void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
+void LoadHandler::LoadHandlerPrint(std::ostream& os) {
PrintHeader(os, "LoadHandler");
// TODO(ishell): implement printing based on handler kind
os << "\n - handler: " << Brief(smi_handler());
@@ -2049,7 +2084,7 @@ void LoadHandler::LoadHandlerPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
+void StoreHandler::StoreHandlerPrint(std::ostream& os) {
PrintHeader(os, "StoreHandler");
// TODO(ishell): implement printing based on handler kind
os << "\n - handler: " << Brief(smi_handler());
@@ -2067,14 +2102,14 @@ void StoreHandler::StoreHandlerPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
+void AccessorPair::AccessorPairPrint(std::ostream& os) {
PrintHeader(os, "AccessorPair");
os << "\n - getter: " << Brief(getter());
os << "\n - setter: " << Brief(setter());
os << "\n";
}
-void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
+void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) {
PrintHeader(os, "CallHandlerInfo");
os << "\n - callback: " << Brief(callback());
os << "\n - js_callback: " << Brief(js_callback());
@@ -2084,8 +2119,7 @@ void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void FunctionTemplateInfo::FunctionTemplateInfoPrint(
- std::ostream& os) { // NOLINT
+void FunctionTemplateInfo::FunctionTemplateInfoPrint(std::ostream& os) {
PrintHeader(os, "FunctionTemplateInfo");
os << "\n - class name: " << Brief(class_name());
os << "\n - tag: " << tag();
@@ -2102,7 +2136,7 @@ void FunctionTemplateInfo::FunctionTemplateInfoPrint(
os << "\n";
}
-void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
+void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) {
PrintHeader(os, "ObjectTemplateInfo");
os << "\n - tag: " << tag();
os << "\n - serial_number: " << serial_number();
@@ -2114,7 +2148,7 @@ void ObjectTemplateInfo::ObjectTemplateInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
+void AllocationSite::AllocationSitePrint(std::ostream& os) {
PrintHeader(os, "AllocationSite");
if (this->HasWeakNext()) os << "\n - weak_next: " << Brief(weak_next());
os << "\n - dependent code: " << Brief(dependent_code());
@@ -2137,7 +2171,7 @@ void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void AllocationMemento::AllocationMementoPrint(std::ostream& os) { // NOLINT
+void AllocationMemento::AllocationMementoPrint(std::ostream& os) {
PrintHeader(os, "AllocationMemento");
os << "\n - allocation site: ";
if (IsValid()) {
@@ -2147,7 +2181,7 @@ void AllocationMemento::AllocationMementoPrint(std::ostream& os) { // NOLINT
}
}
-void Script::ScriptPrint(std::ostream& os) { // NOLINT
+void Script::ScriptPrint(std::ostream& os) {
PrintHeader(os, "Script");
os << "\n - source: " << Brief(source());
os << "\n - name: " << Brief(name());
@@ -2170,9 +2204,11 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
if (!is_wasm) {
if (has_eval_from_shared()) {
os << "\n - eval from shared: " << Brief(eval_from_shared());
- }
- if (is_wrapped()) {
+ } else if (is_wrapped()) {
os << "\n - wrapped arguments: " << Brief(wrapped_arguments());
+ } else if (type() == TYPE_WEB_SNAPSHOT) {
+ os << "\n - shared function info table: "
+ << Brief(shared_function_info_table());
}
os << "\n - eval from position: " << eval_from_position();
}
@@ -2181,7 +2217,7 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
}
#ifdef V8_INTL_SUPPORT
-void JSV8BreakIterator::JSV8BreakIteratorPrint(std::ostream& os) { // NOLINT
+void JSV8BreakIterator::JSV8BreakIteratorPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSV8BreakIterator");
os << "\n - locale: " << Brief(locale());
os << "\n - break iterator: " << Brief(break_iterator());
@@ -2194,14 +2230,14 @@ void JSV8BreakIterator::JSV8BreakIteratorPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void JSCollator::JSCollatorPrint(std::ostream& os) { // NOLINT
+void JSCollator::JSCollatorPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSCollator");
os << "\n - icu collator: " << Brief(icu_collator());
os << "\n - bound compare: " << Brief(bound_compare());
JSObjectPrintBody(os, *this);
}
-void JSDateTimeFormat::JSDateTimeFormatPrint(std::ostream& os) { // NOLINT
+void JSDateTimeFormat::JSDateTimeFormatPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSDateTimeFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - icu locale: " << Brief(icu_locale());
@@ -2212,7 +2248,7 @@ void JSDateTimeFormat::JSDateTimeFormatPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSDisplayNames::JSDisplayNamesPrint(std::ostream& os) { // NOLINT
+void JSDisplayNames::JSDisplayNamesPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSDisplayNames");
os << "\n - internal: " << Brief(internal());
os << "\n - style: " << StyleAsString();
@@ -2220,7 +2256,7 @@ void JSDisplayNames::JSDisplayNamesPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSListFormat::JSListFormatPrint(std::ostream& os) { // NOLINT
+void JSListFormat::JSListFormatPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSListFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - style: " << StyleAsString();
@@ -2229,13 +2265,13 @@ void JSListFormat::JSListFormatPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSLocale::JSLocalePrint(std::ostream& os) { // NOLINT
+void JSLocale::JSLocalePrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSLocale");
os << "\n - icu locale: " << Brief(icu_locale());
JSObjectPrintBody(os, *this);
}
-void JSNumberFormat::JSNumberFormatPrint(std::ostream& os) { // NOLINT
+void JSNumberFormat::JSNumberFormatPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSNumberFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - icu_number_formatter: " << Brief(icu_number_formatter());
@@ -2243,7 +2279,7 @@ void JSNumberFormat::JSNumberFormatPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSPluralRules::JSPluralRulesPrint(std::ostream& os) { // NOLINT
+void JSPluralRules::JSPluralRulesPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSPluralRules");
os << "\n - locale: " << Brief(locale());
os << "\n - type: " << TypeAsString();
@@ -2252,8 +2288,7 @@ void JSPluralRules::JSPluralRulesPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
- std::ostream& os) { // NOLINT
+void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSRelativeTimeFormat");
os << "\n - locale: " << Brief(locale());
os << "\n - numberingSystem: " << Brief(numberingSystem());
@@ -2262,14 +2297,14 @@ void JSRelativeTimeFormat::JSRelativeTimeFormatPrint(
os << "\n";
}
-void JSSegmentIterator::JSSegmentIteratorPrint(std::ostream& os) { // NOLINT
+void JSSegmentIterator::JSSegmentIteratorPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSegmentIterator");
os << "\n - icu break iterator: " << Brief(icu_break_iterator());
os << "\n - granularity: " << GranularityAsString(GetIsolate());
os << "\n";
}
-void JSSegmenter::JSSegmenterPrint(std::ostream& os) { // NOLINT
+void JSSegmenter::JSSegmenterPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSegmenter");
os << "\n - locale: " << Brief(locale());
os << "\n - granularity: " << GranularityAsString(GetIsolate());
@@ -2277,7 +2312,7 @@ void JSSegmenter::JSSegmenterPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, *this);
}
-void JSSegments::JSSegmentsPrint(std::ostream& os) { // NOLINT
+void JSSegments::JSSegmentsPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSSegments");
os << "\n - icu break iterator: " << Brief(icu_break_iterator());
os << "\n - unicode string: " << Brief(unicode_string());
@@ -2301,7 +2336,7 @@ void PrintScopeInfoList(ScopeInfo scope_info, std::ostream& os,
}
} // namespace
-void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
+void ScopeInfo::ScopeInfoPrint(std::ostream& os) {
PrintHeader(os, "ScopeInfo");
if (IsEmpty()) {
os << "\n - empty\n";
@@ -2357,7 +2392,7 @@ void ScopeInfo::ScopeInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) { // NOLINT
+void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) {
PrintHeader(os, "StackFrameInfo");
os << "\n - receiver_or_instance: " << Brief(receiver_or_instance());
os << "\n - function: " << Brief(function());
@@ -2369,7 +2404,7 @@ void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void PreparseData::PreparseDataPrint(std::ostream& os) { // NOLINT
+void PreparseData::PreparseDataPrint(std::ostream& os) {
PrintHeader(os, "PreparseData");
os << "\n - data_length: " << data_length();
os << "\n - children_length: " << children_length();
@@ -2385,7 +2420,7 @@ void PreparseData::PreparseDataPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void InterpreterData::InterpreterDataPrint(std::ostream& os) { // NOLINT
+void InterpreterData::InterpreterDataPrint(std::ostream& os) {
PrintHeader(os, "InterpreterData");
os << "\n - bytecode_array: " << Brief(bytecode_array());
os << "\n - interpreter_trampoline: " << Brief(interpreter_trampoline());
@@ -2480,7 +2515,7 @@ void Map::PrintMapDetails(std::ostream& os) {
instance_descriptors().PrintDescriptors(os);
}
-void Map::MapPrint(std::ostream& os) { // NOLINT
+void Map::MapPrint(std::ostream& os) {
#ifdef OBJECT_PRINT
PrintHeader(os, "Map");
#else
@@ -2662,7 +2697,7 @@ void TransitionArray::PrintInternal(std::ostream& os) {
os << "\n" << std::flush;
}
-void TransitionsAccessor::PrintTransitions(std::ostream& os) { // NOLINT
+void TransitionsAccessor::PrintTransitions(std::ostream& os) {
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
@@ -2730,7 +2765,7 @@ void TransitionsAccessor::PrintTransitionTree(
}
}
-void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
+void JSObject::PrintTransitions(std::ostream& os) {
DisallowGarbageCollection no_gc;
TransitionsAccessor ta(GetIsolate(), map(), &no_gc);
if (ta.NumberOfTransitions() == 0) return;
@@ -2770,6 +2805,22 @@ V8_EXPORT_PRIVATE extern void _v8_internal_Print_Object(void* object) {
GetObjectFromRaw(object).Print();
}
+V8_EXPORT_PRIVATE extern void _v8_internal_Print_LoadHandler(void* object) {
+#ifdef OBJECT_PRINT
+ i::StdoutStream os;
+ i::LoadHandler::PrintHandler(GetObjectFromRaw(object), os);
+ os << std::flush;
+#endif
+}
+
+V8_EXPORT_PRIVATE extern void _v8_internal_Print_StoreHandler(void* object) {
+#ifdef OBJECT_PRINT
+ i::StdoutStream os;
+ i::StoreHandler::PrintHandler(GetObjectFromRaw(object), os);
+ os << std::flush;
+#endif
+}
+
V8_EXPORT_PRIVATE extern void _v8_internal_Print_Code(void* object) {
i::Address address = reinterpret_cast<i::Address>(object);
i::Isolate* isolate = i::Isolate::Current();
diff --git a/chromium/v8/src/diagnostics/perf-jit.cc b/chromium/v8/src/diagnostics/perf-jit.cc
index dea33400480..e5cc93c1f63 100644
--- a/chromium/v8/src/diagnostics/perf-jit.cc
+++ b/chromium/v8/src/diagnostics/perf-jit.cc
@@ -215,7 +215,6 @@ void PerfJitLogger::LogRecordedBuffer(
if (FLAG_perf_basic_prof_only_functions &&
(abstract_code->kind() != CodeKind::INTERPRETED_FUNCTION &&
abstract_code->kind() != CodeKind::TURBOFAN &&
- abstract_code->kind() != CodeKind::NATIVE_CONTEXT_INDEPENDENT &&
abstract_code->kind() != CodeKind::TURBOPROP &&
abstract_code->kind() != CodeKind::BASELINE)) {
return;
diff --git a/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc b/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
index 9757c8901bf..724e08cc453 100644
--- a/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/chromium/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -408,6 +408,16 @@ void Decoder::UnknownFormat(Instruction* instr, const char* name) {
}
void Decoder::DecodeExt0(Instruction* instr) {
+ // Some encodings have integers hard coded in the middle, handle those first.
+ switch (EXT0 | (instr->BitField(20, 16)) | (instr->BitField(10, 0))) {
+#define DECODE_VX_D_FORM__INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Vt, 'Vb"); \
+ return; \
+ }
+ PPC_VX_OPCODE_D_FORM_LIST(DECODE_VX_D_FORM__INSTRUCTIONS)
+#undef DECODE_VX_D_FORM__INSTRUCTIONS
+ }
// Some encodings are 5-0 bits, handle those first
switch (EXT0 | (instr->BitField(5, 0))) {
#define DECODE_VA_A_FORM__INSTRUCTIONS(name, opcode_name, opcode_value) \
@@ -638,6 +648,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "lxvd 'Xt, 'ra, 'rb");
return;
}
+ case LXVX: {
+ Format(instr, "lxvx 'Xt, 'ra, 'rb");
+ return;
+ }
case LXSDX: {
Format(instr, "lxsdx 'Xt, 'ra, 'rb");
return;
@@ -658,6 +672,10 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "stxvd 'Xs, 'ra, 'rb");
return;
}
+ case STXVX: {
+ Format(instr, "stxvx 'Xs, 'ra, 'rb");
+ return;
+ }
case STXSDX: {
Format(instr, "stxsdx 'Xs, 'ra, 'rb");
return;
@@ -1277,12 +1295,6 @@ void Decoder::DecodeExt5(Instruction* instr) {
}
void Decoder::DecodeExt6(Instruction* instr) {
- switch (EXT6 | (instr->BitField(10, 2))) {
- case XXBRQ: {
- Format(instr, "xxbrq 'Xt, 'Xb");
- return;
- }
- }
switch (EXT6 | (instr->BitField(10, 1))) {
case XXSPLTIB: {
Format(instr, "xxspltib 'Xt, 'IMM8");
@@ -1298,6 +1310,16 @@ void Decoder::DecodeExt6(Instruction* instr) {
PPC_XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS)
#undef DECODE_XX3_INSTRUCTIONS
}
+ // Some encodings have integers hard coded in the middle, handle those first.
+ switch (EXT6 | (instr->BitField(20, 16)) | (instr->BitField(10, 2))) {
+#define DECODE_XX2_B_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Xt, 'Xb"); \
+ return; \
+ }
+ PPC_XX2_OPCODE_B_FORM_LIST(DECODE_XX2_B_INSTRUCTIONS)
+#undef DECODE_XX2_B_INSTRUCTIONS
+ }
switch (EXT6 | (instr->BitField(10, 2))) {
#define DECODE_XX2_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
case opcode_name: { \
@@ -1305,8 +1327,8 @@ void Decoder::DecodeExt6(Instruction* instr) {
return; \
}
PPC_XX2_OPCODE_A_FORM_LIST(DECODE_XX2_A_INSTRUCTIONS)
- }
#undef DECODE_XX2_A_INSTRUCTIONS
+ }
Unknown(instr); // not used by V8
}
diff --git a/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc
index a39261555d1..5cbd4b4857f 100644
--- a/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc
+++ b/chromium/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -122,6 +122,8 @@ class Decoder {
// Printing of instruction name.
void PrintInstructionName(Instruction* instr);
+ void PrintTarget(Instruction* instr);
+
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
int FormatFPURegisterOrRoundMode(Instruction* instr, const char* option);
@@ -213,6 +215,21 @@ void Decoder::PrintImm12(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
+void Decoder::PrintTarget(Instruction* instr) {
+ if (Assembler::IsJalr(instr->InstructionBits())) {
+ if (Assembler::IsAuipc((instr - 4)->InstructionBits()) &&
+ (instr - 4)->RdValue() == instr->Rs1Value()) {
+ int32_t imm = Assembler::BrachlongOffset((instr - 4)->InstructionBits(),
+ instr->InstructionBits());
+ const char* target =
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr - 4) + imm);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, " -> %s", target);
+ return;
+ }
+ }
+}
+
void Decoder::PrintBranchOffset(Instruction* instr) {
int32_t imm = instr->BranchOffset();
const char* target =
@@ -699,6 +716,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintVs1(instr);
return 3;
}
+ case 't': { // 'target: target of branch instructions'
+ DCHECK(STRING_STARTS_WITH(format, "target"));
+ PrintTarget(instr);
+ return 6;
+ }
}
UNREACHABLE();
}
@@ -1280,7 +1302,7 @@ void Decoder::DecodeIType(Instruction* instr) {
else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
Format(instr, "jalr 'rs1");
else
- Format(instr, "jalr 'rd, 'imm12('rs1)");
+ Format(instr, "jalr 'rd, 'imm12('rs1)'target");
break;
case RO_LB:
Format(instr, "lb 'rd, 'imm12('rs1)");
@@ -1835,7 +1857,6 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
return d.InstructionDecode(instruction);
}
-// The RISC-V assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return v8::internal::Assembler::ConstantPoolSizeAt(
reinterpret_cast<v8::internal::Instruction*>(instruction));
diff --git a/chromium/v8/src/diagnostics/system-jit-metadata-win.h b/chromium/v8/src/diagnostics/system-jit-metadata-win.h
index 37678c25185..84cf184eda3 100644
--- a/chromium/v8/src/diagnostics/system-jit-metadata-win.h
+++ b/chromium/v8/src/diagnostics/system-jit-metadata-win.h
@@ -6,6 +6,9 @@
#define V8_DIAGNOSTICS_SYSTEM_JIT_METADATA_WIN_H_
#include <Windows.h>
+#ifndef VOID
+#define VOID void
+#endif
#include <TraceLoggingProvider.h>
#include <evntprov.h>
#include <evntrace.h> // defines TRACE_LEVEL_* and EVENT_TRACE_TYPE_*
diff --git a/chromium/v8/src/diagnostics/system-jit-win.cc b/chromium/v8/src/diagnostics/system-jit-win.cc
index 49200219c26..a4c4237c160 100644
--- a/chromium/v8/src/diagnostics/system-jit-win.cc
+++ b/chromium/v8/src/diagnostics/system-jit-win.cc
@@ -9,6 +9,10 @@
#include "src/diagnostics/system-jit-metadata-win.h"
#include "src/libplatform/tracing/recorder.h"
+#if !defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+#error "This file is only compiled if v8_enable_system_instrumentation"
+#endif
+
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wc++98-compat-extra-semi"
#endif
diff --git a/chromium/v8/src/diagnostics/unwinding-info-win64.cc b/chromium/v8/src/diagnostics/unwinding-info-win64.cc
index 9a5f7069e75..6127c0a7867 100644
--- a/chromium/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/chromium/v8/src/diagnostics/unwinding-info-win64.cc
@@ -464,24 +464,27 @@ static decltype(
&::RtlDeleteGrowableFunctionTable) delete_growable_function_table_func =
nullptr;
+void LoadNtdllUnwindingFunctionsOnce() {
+ // Load functions from the ntdll.dll module.
+ HMODULE ntdll_module =
+ LoadLibraryEx(L"ntdll.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ DCHECK_NOT_NULL(ntdll_module);
+
+ // This fails on Windows 7.
+ add_growable_function_table_func =
+ reinterpret_cast<decltype(&::RtlAddGrowableFunctionTable)>(
+ ::GetProcAddress(ntdll_module, "RtlAddGrowableFunctionTable"));
+ DCHECK_IMPLIES(IsWindows8OrGreater(), add_growable_function_table_func);
+
+ delete_growable_function_table_func =
+ reinterpret_cast<decltype(&::RtlDeleteGrowableFunctionTable)>(
+ ::GetProcAddress(ntdll_module, "RtlDeleteGrowableFunctionTable"));
+ DCHECK_IMPLIES(IsWindows8OrGreater(), delete_growable_function_table_func);
+}
+
void LoadNtdllUnwindingFunctions() {
- base::CallOnce(&load_ntdll_unwinding_functions_once, []() {
- // Load functions from the ntdll.dll module.
- HMODULE ntdll_module =
- LoadLibraryEx(L"ntdll.dll", nullptr, LOAD_LIBRARY_SEARCH_SYSTEM32);
- DCHECK_NOT_NULL(ntdll_module);
-
- // This fails on Windows 7.
- add_growable_function_table_func =
- reinterpret_cast<decltype(&::RtlAddGrowableFunctionTable)>(
- ::GetProcAddress(ntdll_module, "RtlAddGrowableFunctionTable"));
- DCHECK_IMPLIES(IsWindows8OrGreater(), add_growable_function_table_func);
-
- delete_growable_function_table_func =
- reinterpret_cast<decltype(&::RtlDeleteGrowableFunctionTable)>(
- ::GetProcAddress(ntdll_module, "RtlDeleteGrowableFunctionTable"));
- DCHECK_IMPLIES(IsWindows8OrGreater(), delete_growable_function_table_func);
- });
+ base::CallOnce(&load_ntdll_unwinding_functions_once,
+ &LoadNtdllUnwindingFunctionsOnce);
}
bool AddGrowableFunctionTable(PVOID* DynamicTable,
@@ -574,6 +577,11 @@ void UnregisterNonABICompliantCodeRange(void* start) {
ExceptionHandlerRecord* record =
reinterpret_cast<ExceptionHandlerRecord*>(start);
CHECK(::RtlDeleteFunctionTable(record->runtime_function));
+
+ // Unprotect reserved page.
+ DWORD old_protect;
+ CHECK(VirtualProtect(start, sizeof(ExceptionHandlerRecord),
+ PAGE_READWRITE, &old_protect));
}
#endif // V8_OS_WIN_X64
} else {
@@ -582,6 +590,11 @@ void UnregisterNonABICompliantCodeRange(void* start) {
if (record->dynamic_table) {
DeleteGrowableFunctionTable(record->dynamic_table);
}
+
+ // Unprotect reserved page.
+ DWORD old_protect;
+ CHECK(VirtualProtect(start, sizeof(CodeRangeUnwindingRecord),
+ PAGE_READWRITE, &old_protect));
}
}
diff --git a/chromium/v8/src/execution/arguments.h b/chromium/v8/src/execution/arguments.h
index 39877cf4d29..22ce80bb1e3 100644
--- a/chromium/v8/src/execution/arguments.h
+++ b/chromium/v8/src/execution/arguments.h
@@ -107,29 +107,40 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
-#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, InternalType, Convert, Name) \
- static V8_INLINE InternalType __RT_impl_##Name(RuntimeArguments args, \
- Isolate* isolate); \
- \
+#ifdef V8_RUNTIME_CALL_STATS
+#define RUNTIME_ENTRY_WITH_RCS(Type, InternalType, Convert, Name) \
V8_NOINLINE static Type Stats_##Name(int args_length, Address* args_object, \
Isolate* isolate) { \
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::k##Name); \
+ RCS_SCOPE(isolate, RuntimeCallCounterId::k##Name); \
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
"V8.Runtime_" #Name); \
RuntimeArguments args(args_length, args_object); \
return Convert(__RT_impl_##Name(args, isolate)); \
- } \
- \
- Type Name(int args_length, Address* args_object, Isolate* isolate) { \
- DCHECK(isolate->context().is_null() || isolate->context().IsContext()); \
- CLOBBER_DOUBLE_REGISTERS(); \
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { \
- return Stats_##Name(args_length, args_object, isolate); \
- } \
- RuntimeArguments args(args_length, args_object); \
- return Convert(__RT_impl_##Name(args, isolate)); \
- } \
- \
+ }
+
+#define TEST_AND_CALL_RCS(Name) \
+ if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) { \
+ return Stats_##Name(args_length, args_object, isolate); \
+ }
+
+#else // V8_RUNTIME_CALL_STATS
+#define RUNTIME_ENTRY_WITH_RCS(Type, InternalType, Convert, Name)
+#define TEST_AND_CALL_RCS(Name)
+
+#endif // V8_RUNTIME_CALL_STATS
+
+#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, InternalType, Convert, Name) \
+ static V8_INLINE InternalType __RT_impl_##Name(RuntimeArguments args, \
+ Isolate* isolate); \
+ RUNTIME_ENTRY_WITH_RCS(Type, InternalType, Convert, Name) \
+ Type Name(int args_length, Address* args_object, Isolate* isolate) { \
+ DCHECK(isolate->context().is_null() || isolate->context().IsContext()); \
+ CLOBBER_DOUBLE_REGISTERS(); \
+ TEST_AND_CALL_RCS(Name) \
+ RuntimeArguments args(args_length, args_object); \
+ return Convert(__RT_impl_##Name(args, isolate)); \
+ } \
+ \
static InternalType __RT_impl_##Name(RuntimeArguments args, Isolate* isolate)
#define CONVERT_OBJECT(x) (x).ptr()
diff --git a/chromium/v8/src/execution/arm/simulator-arm.cc b/chromium/v8/src/execution/arm/simulator-arm.cc
index ee4e9af4621..ca7d709fbb7 100644
--- a/chromium/v8/src/execution/arm/simulator-arm.cc
+++ b/chromium/v8/src/execution/arm/simulator-arm.cc
@@ -42,7 +42,7 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
+#define SScanF sscanf
// The ArmDebugger class is used by the simulator while debugging simulated ARM
// code.
@@ -50,6 +50,7 @@ class ArmDebugger {
public:
explicit ArmDebugger(Simulator* sim) : sim_(sim) {}
void Debug();
+ bool ExecDebugCommand(ArrayUniquePtr<char> line_ptr);
private:
static const Instr kBreakpointInstr =
@@ -191,6 +192,31 @@ void ArmDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
+ // Unset breakpoint while running in the debugger shell, making it invisible
+ // to all commands.
+ UndoBreakpoint();
+
+ while (!done && !sim_->has_bad_pc()) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.begin());
+ last_pc = sim_->get_pc();
+ }
+ ArrayUniquePtr<char> line(ReadLine("sim> "));
+
+ done = ExecDebugCommand(std::move(line));
+ }
+
+ // Reinstall breakpoint to stop execution and enter the debugger shell when
+ // hit.
+ RedoBreakpoint();
+}
+
+bool ArmDebugger::ExecDebugCommand(ArrayUniquePtr<char> line_ptr) {
#define COMMAND_SIZE 63
#define ARG_SIZE 255
@@ -207,352 +233,357 @@ void ArmDebugger::Debug() {
arg1[ARG_SIZE] = 0;
arg2[ARG_SIZE] = 0;
- // Unset breakpoint while running in the debugger shell, making it invisible
- // to all commands.
- UndoBreakpoint();
+ if (line_ptr == nullptr) return true;
- while (!done && !sim_->has_bad_pc()) {
- if (last_pc != sim_->get_pc()) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
- PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.begin());
- last_pc = sim_->get_pc();
- }
- char* line = ReadLine("sim> ");
- if (line == nullptr) {
- break;
- } else {
- char* last_input = sim_->last_debugger_input();
- if (strcmp(line, "\n") == 0 && last_input != nullptr) {
- line = last_input;
- } else {
- // Ownership is transferred to sim_;
- sim_->set_last_debugger_input(line);
- }
- // Use sscanf to parse the individual parts of the command line. At the
- // moment no command expects more than two parameters.
- int argc = SScanF(line,
- "%" XSTR(COMMAND_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s "
- "%" XSTR(ARG_SIZE) "s",
- cmd, arg1, arg2);
- if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
- // Execute the one instruction we broke at with breakpoints disabled.
- sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
- // Leave the debugger shell.
- done = true;
- } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
- int32_t value;
- float svalue;
- double dvalue;
- if (strcmp(arg1, "all") == 0) {
- for (int i = 0; i < kNumRegisters; i++) {
- value = GetRegisterValue(i);
- PrintF("%3s: 0x%08x %10d", RegisterName(Register::from_code(i)),
- value, value);
- if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
- (i % 2) == 0) {
- dvalue = GetRegisterPairDoubleValue(i);
- PrintF(" (%f)\n", dvalue);
- } else {
- PrintF("\n");
- }
- }
- for (int i = 0; i < DwVfpRegister::SupportedRegisterCount(); i++) {
- dvalue = GetVFPDoubleRegisterValue(i);
- uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%3s: %f 0x%08x %08x\n", VFPRegisters::Name(i, true),
- dvalue, static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xFFFFFFFF));
- }
+ // Repeat last command by default.
+ const char* line = line_ptr.get();
+ const char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && (last_input != nullptr)) {
+ line_ptr.reset();
+ line = last_input;
+ } else {
+ // Update the latest command ran
+ sim_->set_last_debugger_input(std::move(line_ptr));
+ }
+
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ return true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
+ int32_t value;
+ float svalue;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ for (int i = 0; i < kNumRegisters; i++) {
+ value = GetRegisterValue(i);
+ PrintF("%3s: 0x%08x %10d", RegisterName(Register::from_code(i)),
+ value, value);
+ if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 && (i % 2) == 0) {
+ dvalue = GetRegisterPairDoubleValue(i);
+ PrintF(" (%f)\n", dvalue);
} else {
- if (GetValue(arg1, &value)) {
- PrintF("%s: 0x%08x %d \n", arg1, value, value);
- } else if (GetVFPSingleValue(arg1, &svalue)) {
- uint32_t as_word = bit_cast<uint32_t>(svalue);
- PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
- } else if (GetVFPDoubleValue(arg1, &dvalue)) {
- uint64_t as_words = bit_cast<uint64_t>(dvalue);
- PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
- static_cast<uint32_t>(as_words >> 32),
- static_cast<uint32_t>(as_words & 0xFFFFFFFF));
- } else {
- PrintF("%s unrecognized\n", arg1);
- }
+ PrintF("\n");
}
+ }
+ for (int i = 0; i < DwVfpRegister::SupportedRegisterCount(); i++) {
+ dvalue = GetVFPDoubleRegisterValue(i);
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
+ PrintF("%3s: %f 0x%08x %08x\n", VFPRegisters::Name(i, true), dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
+ }
+ } else {
+ if (GetValue(arg1, &value)) {
+ PrintF("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (GetVFPSingleValue(arg1, &svalue)) {
+ uint32_t as_word = bit_cast<uint32_t>(svalue);
+ PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
+ } else if (GetVFPDoubleValue(arg1, &dvalue)) {
+ uint64_t as_words = bit_cast<uint64_t>(dvalue);
+ PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
+ static_cast<uint32_t>(as_words >> 32),
+ static_cast<uint32_t>(as_words & 0xFFFFFFFF));
} else {
- PrintF("print <register>\n");
+ PrintF("%s unrecognized\n", arg1);
}
- } else if ((strcmp(cmd, "po") == 0) ||
- (strcmp(cmd, "printobject") == 0)) {
- if (argc == 2) {
- int32_t value;
- StdoutStream os;
- if (GetValue(arg1, &value)) {
- Object obj(value);
- os << arg1 << ": \n";
+ }
+ } else {
+ PrintF("print <register>\n");
+ }
+ } else if ((strcmp(cmd, "po") == 0) || (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ int32_t value;
+ StdoutStream os;
+ if (GetValue(arg1, &value)) {
+ Object obj(value);
+ os << arg1 << ": \n";
#ifdef DEBUG
- obj.Print(os);
- os << "\n";
+ obj.Print(os);
+ os << "\n";
#else
- os << Brief(obj) << "\n";
+ os << Brief(obj) << "\n";
#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+ } else { // "mem"
+ int32_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ return false;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
+ while (cur < end) {
+ PrintF(" 0x%08" V8PRIxPTR ": 0x%08x %10d",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ Object obj(*cur);
+ Heap* current_heap = sim_->isolate_->heap();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
} else {
- os << arg1 << " unrecognized\n";
- }
- } else {
- PrintF("printobject <value>\n");
- }
- } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
- strcmp(cmd, "dump") == 0) {
- int32_t* cur = nullptr;
- int32_t* end = nullptr;
- int next_arg = 1;
-
- if (strcmp(cmd, "stack") == 0) {
- cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
- } else { // "mem"
- int32_t value;
- if (!GetValue(arg1, &value)) {
- PrintF("%s unrecognized\n", arg1);
- continue;
+ obj.ShortPrint();
}
- cur = reinterpret_cast<int32_t*>(value);
- next_arg++;
+ PrintF(")");
}
+ }
+ PrintF("\n");
+ cur++;
+ }
+ } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
- int32_t words;
- if (argc == next_arg) {
- words = 10;
- } else {
- if (!GetValue(argv[next_arg], &words)) {
- words = 10;
- }
+ byte* prev = nullptr;
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * kInstrSize);
}
- end = cur + words;
-
- bool skip_obj_print = (strcmp(cmd, "dump") == 0);
- while (cur < end) {
- PrintF(" 0x%08" V8PRIxPTR ": 0x%08x %10d",
- reinterpret_cast<intptr_t>(cur), *cur, *cur);
- Object obj(*cur);
- Heap* current_heap = sim_->isolate_->heap();
- if (!skip_obj_print) {
- if (obj.IsSmi() ||
- IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
- PrintF(" (");
- if (obj.IsSmi()) {
- PrintF("smi %d", Smi::ToInt(obj));
- } else {
- obj.ShortPrint();
- }
- PrintF(")");
- }
- }
- PrintF("\n");
- cur++;
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * kInstrSize);
}
- } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
- disasm::NameConverter converter;
- disasm::Disassembler dasm(converter);
- // use a reasonably large buffer
- v8::internal::EmbeddedVector<char, 256> buffer;
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
- byte* prev = nullptr;
- byte* cur = nullptr;
- byte* end = nullptr;
+ while (cur < end) {
+ prev = cur;
+ cur += dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
+ buffer.begin());
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "backtrace") == 0 || strcmp(cmd, "bt") == 0) {
+ int32_t pc = sim_->get_pc();
+ int32_t lr = sim_->get_register(Simulator::lr);
+ int32_t sp = sim_->get_register(Simulator::sp);
+ int32_t fp = sim_->get_register(Simulator::fp);
- if (argc == 1) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- end = cur + (10 * kInstrSize);
- } else if (argc == 2) {
- int regnum = Registers::Number(arg1);
- if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
- // The argument is an address or a register name.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(value);
- // Disassemble 10 instructions at <arg1>.
- end = cur + (10 * kInstrSize);
- }
- } else {
- // The argument is the number of instructions.
- int32_t value;
- if (GetValue(arg1, &value)) {
- cur = reinterpret_cast<byte*>(sim_->get_pc());
- // Disassemble <arg1> instructions.
- end = cur + (value * kInstrSize);
- }
+ int i = 0;
+ while (true) {
+ PrintF("#%d: 0x%08x (sp=0x%08x, fp=0x%08x)\n", i, pc, sp, fp);
+ pc = lr;
+ sp = fp;
+ if (pc == Simulator::end_sim_pc) {
+ break;
+ }
+ lr = *(reinterpret_cast<int32_t*>(fp) + 1);
+ fp = *reinterpret_cast<int32_t*>(fp);
+ i++;
+ if (i > 100) {
+ PrintF("Too many frames\n");
+ break;
+ }
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ DeleteBreakpoint();
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("N flag: %d; ", sim_->n_flag_);
+ PrintF("Z flag: %d; ", sim_->z_flag_);
+ PrintF("C flag: %d; ", sim_->c_flag_);
+ PrintF("V flag: %d\n", sim_->v_flag_);
+ PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
+ PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
+ PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
+ PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
+ PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() - kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (stop_instr->IsStop()) {
+ SetInstructionBitsInCodeSpace(stop_instr, kNopInstr,
+ sim_->isolate_->heap());
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->PrintStopInfo(i);
}
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
} else {
- int32_t value1;
- int32_t value2;
- if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
- cur = reinterpret_cast<byte*>(value1);
- end = cur + (value2 * kInstrSize);
- }
+ PrintF("Unrecognized argument.\n");
}
-
- while (cur < end) {
- prev = cur;
- cur += dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08" V8PRIxPTR " %s\n", reinterpret_cast<intptr_t>(prev),
- buffer.begin());
- }
- } else if (strcmp(cmd, "gdb") == 0) {
- PrintF("relinquishing control to gdb\n");
- v8::base::OS::DebugBreak();
- PrintF("regaining control from gdb\n");
- } else if (strcmp(cmd, "break") == 0) {
- if (argc == 2) {
- int32_t value;
- if (GetValue(arg1, &value)) {
- if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
- PrintF("setting breakpoint failed\n");
- }
- } else {
- PrintF("%s unrecognized\n", arg1);
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->EnableStop(i);
}
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
} else {
- PrintF("break <address>\n");
+ PrintF("Unrecognized argument.\n");
}
- } else if (strcmp(cmd, "del") == 0) {
- DeleteBreakpoint();
- } else if (strcmp(cmd, "flags") == 0) {
- PrintF("N flag: %d; ", sim_->n_flag_);
- PrintF("Z flag: %d; ", sim_->z_flag_);
- PrintF("C flag: %d; ", sim_->c_flag_);
- PrintF("V flag: %d\n", sim_->v_flag_);
- PrintF("INVALID OP flag: %d; ", sim_->inv_op_vfp_flag_);
- PrintF("DIV BY ZERO flag: %d; ", sim_->div_zero_vfp_flag_);
- PrintF("OVERFLOW flag: %d; ", sim_->overflow_vfp_flag_);
- PrintF("UNDERFLOW flag: %d; ", sim_->underflow_vfp_flag_);
- PrintF("INEXACT flag: %d;\n", sim_->inexact_vfp_flag_);
- } else if (strcmp(cmd, "stop") == 0) {
- int32_t value;
- intptr_t stop_pc = sim_->get_pc() - kInstrSize;
- Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
- if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
- // Remove the current stop.
- if (stop_instr->IsStop()) {
- SetInstructionBitsInCodeSpace(stop_instr, kNopInstr,
- sim_->isolate_->heap());
- } else {
- PrintF("Not at debugger stop.\n");
- }
- } else if (argc == 3) {
- // Print information about all/the specified breakpoint(s).
- if (strcmp(arg1, "info") == 0) {
- if (strcmp(arg2, "all") == 0) {
- PrintF("Stop information:\n");
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->PrintStopInfo(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->PrintStopInfo(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "enable") == 0) {
- // Enable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->EnableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->EnableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
- } else if (strcmp(arg1, "disable") == 0) {
- // Disable all/the specified breakpoint(s).
- if (strcmp(arg2, "all") == 0) {
- for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
- sim_->DisableStop(i);
- }
- } else if (GetValue(arg2, &value)) {
- sim_->DisableStop(value);
- } else {
- PrintF("Unrecognized argument.\n");
- }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+ sim_->DisableStop(i);
}
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
} else {
- PrintF("Wrong usage. Use help command for more information.\n");
+ PrintF("Unrecognized argument.\n");
}
- } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
- ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
- PrintF("Trace of executed instructions is %s\n",
- ::v8::internal::FLAG_trace_sim ? "on" : "off");
- } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
- PrintF("cont\n");
- PrintF(" continue execution (alias 'c')\n");
- PrintF("stepi\n");
- PrintF(" step one instruction (alias 'si')\n");
- PrintF("print <register>\n");
- PrintF(" print register content (alias 'p')\n");
- PrintF(" use register name 'all' to print all registers\n");
- PrintF(" add argument 'fp' to print register pair double values\n");
- PrintF("printobject <register>\n");
- PrintF(" print an object from a register (alias 'po')\n");
- PrintF("flags\n");
- PrintF(" print flags\n");
- PrintF("stack [<words>]\n");
- PrintF(" dump stack content, default dump 10 words)\n");
- PrintF("mem <address> [<words>]\n");
- PrintF(" dump memory content, default dump 10 words)\n");
- PrintF("dump [<words>]\n");
- PrintF(
- " dump memory content without pretty printing JS objects, default "
- "dump 10 words)\n");
- PrintF("disasm [<instructions>]\n");
- PrintF("disasm [<address/register>]\n");
- PrintF("disasm [[<address/register>] <instructions>]\n");
- PrintF(" disassemble code, default is 10 instructions\n");
- PrintF(" from pc (alias 'di')\n");
- PrintF("gdb\n");
- PrintF(" enter gdb\n");
- PrintF("break <address>\n");
- PrintF(" set a break point on the address\n");
- PrintF("del\n");
- PrintF(" delete the breakpoint\n");
- PrintF("trace (alias 't')\n");
- PrintF(" toogle the tracing of all executed statements\n");
- PrintF("stop feature:\n");
- PrintF(" Description:\n");
- PrintF(" Stops are debug instructions inserted by\n");
- PrintF(" the Assembler::stop() function.\n");
- PrintF(" When hitting a stop, the Simulator will\n");
- PrintF(" stop and give control to the ArmDebugger.\n");
- PrintF(" The first %d stop codes are watched:\n",
- Simulator::kNumOfWatchedStops);
- PrintF(" - They can be enabled / disabled: the Simulator\n");
- PrintF(" will / won't stop when hitting them.\n");
- PrintF(" - The Simulator keeps track of how many times they \n");
- PrintF(" are met. (See the info command.) Going over a\n");
- PrintF(" disabled stop still increases its counter. \n");
- PrintF(" Commands:\n");
- PrintF(" stop info all/<code> : print infos about number <code>\n");
- PrintF(" or all stop(s).\n");
- PrintF(" stop enable/disable all/<code> : enables / disables\n");
- PrintF(" all or number <code> stop(s)\n");
- PrintF(" stop unstop\n");
- PrintF(" ignore the stop instruction at the current location\n");
- PrintF(" from now on\n");
- } else {
- PrintF("Unknown command: %s\n", cmd);
}
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
}
+ } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+ ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+ PrintF("Trace of executed instructions is %s\n",
+ ::v8::internal::FLAG_trace_sim ? "on" : "off");
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi\n");
+ PrintF(" step one instruction (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to print all registers\n");
+ PrintF(" add argument 'fp' to print register pair double values\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("flags\n");
+ PrintF(" print flags\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("backtrace / bt\n");
+ PrintF(" Walk the frame pointers, dumping the pc/sp/fp for each frame.\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("trace (alias 't')\n");
+ PrintF(" toogle the tracing of all executed statements\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and give control to the ArmDebugger.\n");
+ PrintF(" The first %d stop codes are watched:\n",
+ Simulator::kNumOfWatchedStops);
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
}
-
- // Reinstall breakpoint to stop execution and enter the debugger shell when
- // hit.
- RedoBreakpoint();
+ return false;
#undef COMMAND_SIZE
#undef ARG_SIZE
@@ -577,11 +608,6 @@ static bool AllOnOnePage(uintptr_t start, int size) {
return start_page == end_page;
}
-void Simulator::set_last_debugger_input(char* input) {
- DeleteArray(last_debugger_input_);
- last_debugger_input_ = input;
-}
-
void Simulator::SetRedirectInstruction(Instruction* instruction) {
instruction->SetInstructionBits(al | (0xF * B24) | kCallRtRedirected);
}
@@ -6499,4 +6525,26 @@ void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
} // namespace internal
} // namespace v8
+//
+// The following functions are used by our gdb macros.
+//
+V8_EXPORT_PRIVATE extern bool _v8_internal_Simulator_ExecDebugCommand(
+ const char* command) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!isolate) {
+ fprintf(stderr, "No V8 Isolate found\n");
+ return false;
+ }
+ i::Simulator* simulator = i::Simulator::current(isolate);
+ if (!simulator) {
+ fprintf(stderr, "No Arm simulator found\n");
+ return false;
+ }
+ // Copy the command so that the simulator can take ownership of it.
+ size_t len = strlen(command);
+ i::ArrayUniquePtr<char> command_copy(i::NewArray<char>(len + 1));
+ i::MemCopy(command_copy.get(), command, len + 1);
+ return i::ArmDebugger(simulator).ExecDebugCommand(std::move(command_copy));
+}
+
#endif // USE_SIMULATOR
diff --git a/chromium/v8/src/execution/arm/simulator-arm.h b/chromium/v8/src/execution/arm/simulator-arm.h
index a4570ff47df..28cf72591ca 100644
--- a/chromium/v8/src/execution/arm/simulator-arm.h
+++ b/chromium/v8/src/execution/arm/simulator-arm.h
@@ -77,6 +77,7 @@ class Simulator : public SimulatorBase {
r14,
r15,
num_registers,
+ fp = 11,
sp = 13,
lr = 14,
pc = 15,
@@ -255,8 +256,10 @@ class Simulator : public SimulatorBase {
uintptr_t PopAddress();
// Debugger input.
- void set_last_debugger_input(char* input);
- char* last_debugger_input() { return last_debugger_input_; }
+ void set_last_debugger_input(ArrayUniquePtr<char> input) {
+ last_debugger_input_ = std::move(input);
+ }
+ const char* last_debugger_input() { return last_debugger_input_.get(); }
// Redirection support.
static void SetRedirectInstruction(Instruction* instruction);
@@ -468,7 +471,7 @@ class Simulator : public SimulatorBase {
int icount_;
// Debugger input.
- char* last_debugger_input_;
+ ArrayUniquePtr<char> last_debugger_input_;
// Registered breakpoints.
Instruction* break_pc_;
diff --git a/chromium/v8/src/execution/arm64/simulator-arm64.cc b/chromium/v8/src/execution/arm64/simulator-arm64.cc
index 23a03848ad5..3853316a801 100644
--- a/chromium/v8/src/execution/arm64/simulator-arm64.cc
+++ b/chromium/v8/src/execution/arm64/simulator-arm64.cc
@@ -34,7 +34,7 @@ namespace internal {
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
+#define SScanF sscanf
// Helpers for colors.
#define COLOUR(colour_code) "\033[0;" colour_code "m"
diff --git a/chromium/v8/src/execution/execution.cc b/chromium/v8/src/execution/execution.cc
index 7866b406d90..b16e791aaa3 100644
--- a/chromium/v8/src/execution/execution.cc
+++ b/chromium/v8/src/execution/execution.cc
@@ -190,8 +190,12 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
Handle<Context> context = ScriptContextTable::GetContext(
isolate, script_context, lookup.context_index);
- // If we are trying to re-declare a REPL-mode let as a let, allow it.
- if (!(mode == VariableMode::kLet && lookup.mode == VariableMode::kLet &&
+ // If we are trying to re-declare a REPL-mode let as a let or REPL-mode
+ // const as a const, allow it.
+ if (!(((mode == VariableMode::kLet &&
+ lookup.mode == VariableMode::kLet) ||
+ (mode == VariableMode::kConst &&
+ lookup.mode == VariableMode::kConst)) &&
scope_info->IsReplModeScope() &&
context->scope_info().IsReplModeScope())) {
// ES#sec-globaldeclarationinstantiation 5.b:
@@ -244,7 +248,7 @@ MaybeHandle<Context> NewScriptContext(Isolate* isolate,
V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
const InvokeParams& params) {
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kInvoke);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kInvoke);
DCHECK(!params.receiver->IsJSGlobalObject());
DCHECK_LE(params.argc, FixedArray::kMaxLength);
@@ -368,7 +372,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
Address func = params.target->ptr();
Address recv = params.receiver->ptr();
Address** argv = reinterpret_cast<Address**>(params.argv);
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kJS_Execution);
value = Object(stub_entry.Call(isolate->isolate_data()->isolate_root(),
orig_func, func, recv, params.argc, argv));
} else {
@@ -383,7 +387,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
JSEntryFunction stub_entry =
JSEntryFunction::FromAddress(isolate, code->InstructionStart());
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kJS_Execution);
value = Object(stub_entry.Call(isolate->isolate_data()->isolate_root(),
params.microtask_queue));
}
@@ -552,7 +556,7 @@ void Execution::CallWasm(Isolate* isolate, Handle<Code> wrapper_code,
trap_handler::SetThreadInWasm();
{
- RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kJS_Execution);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kJS_Execution);
STATIC_ASSERT(compiler::CWasmEntryParameters::kCodeEntry == 0);
STATIC_ASSERT(compiler::CWasmEntryParameters::kObjectRef == 1);
STATIC_ASSERT(compiler::CWasmEntryParameters::kArgumentsBuffer == 2);
diff --git a/chromium/v8/src/execution/external-pointer-table.h b/chromium/v8/src/execution/external-pointer-table.h
index 243682b36cf..4a80fe7d4dd 100644
--- a/chromium/v8/src/execution/external-pointer-table.h
+++ b/chromium/v8/src/execution/external-pointer-table.h
@@ -27,7 +27,7 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
buffer_[kNullExternalPointer] = kNullAddress;
}
- ~ExternalPointerTable() { ::free(buffer_); }
+ ~ExternalPointerTable() { base::Free(buffer_); }
Address get(uint32_t index) const {
CHECK_LT(index, length_);
@@ -49,13 +49,6 @@ class V8_EXPORT_PRIVATE ExternalPointerTable {
return index;
}
- void free(uint32_t index) {
- // TODO(v8:10391, saelo): implement simple free list here, i.e. set
- // buffer_[index] to freelist_head_ and set freelist_head
- // to index
- DCHECK_NE(kNullExternalPointer, index);
- }
-
// Returns true if the entry exists in the table and therefore it can be read.
bool is_valid_index(uint32_t index) const {
// TODO(v8:10391, saelo): also check here if entry is free
diff --git a/chromium/v8/src/execution/frame-constants.h b/chromium/v8/src/execution/frame-constants.h
index 10cdff5c0fc..1148a942123 100644
--- a/chromium/v8/src/execution/frame-constants.h
+++ b/chromium/v8/src/execution/frame-constants.h
@@ -390,23 +390,23 @@ inline static int FrameSlotToFPOffset(int slot) {
} // namespace v8
#if V8_TARGET_ARCH_IA32
-#include "src/execution/ia32/frame-constants-ia32.h" // NOLINT
+#include "src/execution/ia32/frame-constants-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "src/execution/x64/frame-constants-x64.h" // NOLINT
+#include "src/execution/x64/frame-constants-x64.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/execution/arm64/frame-constants-arm64.h" // NOLINT
+#include "src/execution/arm64/frame-constants-arm64.h"
#elif V8_TARGET_ARCH_ARM
-#include "src/execution/arm/frame-constants-arm.h" // NOLINT
+#include "src/execution/arm/frame-constants-arm.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
-#include "src/execution/ppc/frame-constants-ppc.h" // NOLINT
+#include "src/execution/ppc/frame-constants-ppc.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/execution/mips/frame-constants-mips.h" // NOLINT
+#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/execution/mips64/frame-constants-mips64.h" // NOLINT
+#include "src/execution/mips64/frame-constants-mips64.h"
#elif V8_TARGET_ARCH_S390
-#include "src/execution/s390/frame-constants-s390.h" // NOLINT
+#include "src/execution/s390/frame-constants-s390.h"
#elif V8_TARGET_ARCH_RISCV64
-#include "src/execution/riscv64/frame-constants-riscv64.h" // NOLINT
+#include "src/execution/riscv64/frame-constants-riscv64.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/chromium/v8/src/execution/frames.cc b/chromium/v8/src/execution/frames.cc
index 65d34a56519..bef92948ebd 100644
--- a/chromium/v8/src/execution/frames.cc
+++ b/chromium/v8/src/execution/frames.cc
@@ -228,8 +228,8 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
Builtins::Name builtin_index = InstructionStream::TryLookupCode(isolate, pc);
if (builtin_index != Builtins::kNoBuiltinId &&
(builtin_index == Builtins::kInterpreterEntryTrampoline ||
- builtin_index == Builtins::kInterpreterEnterBytecodeAdvance ||
- builtin_index == Builtins::kInterpreterEnterBytecodeDispatch ||
+ builtin_index == Builtins::kInterpreterEnterAtBytecode ||
+ builtin_index == Builtins::kInterpreterEnterAtNextBytecode ||
builtin_index == Builtins::kBaselineEnterAtBytecode ||
builtin_index == Builtins::kBaselineEnterAtNextBytecode)) {
return true;
@@ -610,7 +610,6 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
}
return BUILTIN;
case CodeKind::TURBOFAN:
- case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
case CodeKind::TURBOPROP:
return OPTIMIZED;
case CodeKind::BASELINE:
@@ -1060,12 +1059,14 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
}
// Visit pointer spill slots and locals.
- uint8_t* safepoint_bits = safepoint_entry.bits();
- for (unsigned index = 0; index < stack_slots; index++) {
- int byte_index = index >> kBitsPerByteLog2;
- int bit_index = index & (kBitsPerByte - 1);
- if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
- FullObjectSlot spill_slot = parameters_limit + index;
+ DCHECK_GE((stack_slots + kBitsPerByte) / kBitsPerByte,
+ safepoint_entry.entry_size());
+ int slot_offset = 0;
+ for (uint8_t bits : safepoint_entry.iterate_bits()) {
+ while (bits) {
+ int bit = base::bits::CountTrailingZeros(bits);
+ bits &= ~(1 << bit);
+ FullObjectSlot spill_slot = parameters_limit + slot_offset + bit;
#ifdef V8_COMPRESS_POINTERS
// Spill slots may contain compressed values in which case the upper
// 32-bits will contain zeros. In order to simplify handling of such
@@ -1083,6 +1084,7 @@ void CommonFrame::IterateCompiledFrame(RootVisitor* v) const {
#endif
v->VisitRootPointer(Root::kStackRoots, nullptr, spill_slot);
}
+ slot_offset += kBitsPerByte;
}
// Visit tagged parameters that have been passed to the function of this
@@ -1444,10 +1446,6 @@ Handle<Object> FrameSummary::JavaScriptFrameSummary::script() const {
return handle(function_->shared().script(), isolate());
}
-Handle<String> FrameSummary::JavaScriptFrameSummary::FunctionName() const {
- return JSFunction::GetDebugName(function_);
-}
-
Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
return handle(function_->context().native_context(), isolate());
}
@@ -1485,13 +1483,6 @@ Handle<Script> FrameSummary::WasmFrameSummary::script() const {
wasm_instance()->GetIsolate());
}
-Handle<String> FrameSummary::WasmFrameSummary::FunctionName() const {
- Handle<WasmModuleObject> module_object(wasm_instance()->module_object(),
- isolate());
- return WasmModuleObject::GetFunctionName(isolate(), module_object,
- function_index());
-}
-
Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
return handle(wasm_instance()->native_context(), isolate());
}
@@ -1563,7 +1554,6 @@ FRAME_SUMMARY_DISPATCH(bool, is_subject_to_debugging)
FRAME_SUMMARY_DISPATCH(Handle<Object>, script)
FRAME_SUMMARY_DISPATCH(int, SourcePosition)
FRAME_SUMMARY_DISPATCH(int, SourceStatementPosition)
-FRAME_SUMMARY_DISPATCH(Handle<String>, FunctionName)
FRAME_SUMMARY_DISPATCH(Handle<Context>, native_context)
#undef FRAME_SUMMARY_DISPATCH
@@ -1860,8 +1850,13 @@ int BuiltinFrame::ComputeParametersCount() const {
#if V8_ENABLE_WEBASSEMBLY
void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
int index) const {
- wasm::WasmCodeRefScope code_ref_scope;
PrintIndex(accumulator, mode, index);
+ if (function_index() == wasm::kAnonymousFuncIndex) {
+ accumulator->Add("Anonymous wasm wrapper [pc: %p]\n",
+ reinterpret_cast<void*>(pc()));
+ return;
+ }
+ wasm::WasmCodeRefScope code_ref_scope;
accumulator->Add("WASM [");
accumulator->PrintName(script().name());
Address instruction_start = isolate()
@@ -1905,7 +1900,7 @@ WasmModuleObject WasmFrame::module_object() const {
return wasm_instance().module_object();
}
-uint32_t WasmFrame::function_index() const {
+int WasmFrame::function_index() const {
wasm::WasmCodeRefScope code_ref_scope;
return wasm_code()->index();
}
diff --git a/chromium/v8/src/execution/frames.h b/chromium/v8/src/execution/frames.h
index 32157b4cc12..8d9dadd76d2 100644
--- a/chromium/v8/src/execution/frames.h
+++ b/chromium/v8/src/execution/frames.h
@@ -138,9 +138,7 @@ class StackFrame {
INNER_JSENTRY_FRAME = (0 << kSmiTagSize) | kSmiTag,
OUTERMOST_JSENTRY_FRAME = (1 << kSmiTagSize) | kSmiTag
};
- // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((INNER_JSENTRY_FRAME & kHeapObjectTagMask) != kHeapObjectTag);
- // NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((OUTERMOST_JSENTRY_FRAME & kHeapObjectTagMask) !=
kHeapObjectTag);
@@ -389,7 +387,6 @@ class V8_EXPORT_PRIVATE FrameSummary {
int SourcePosition() const;
int SourceStatementPosition() const;
Handle<Object> script() const;
- Handle<String> FunctionName() const;
Handle<Context> native_context() const;
private:
@@ -418,7 +415,6 @@ class V8_EXPORT_PRIVATE FrameSummary {
int SourceStatementPosition() const { return SourcePosition(); }
Handle<Script> script() const;
Handle<WasmInstanceObject> wasm_instance() const { return wasm_instance_; }
- Handle<String> FunctionName() const;
Handle<Context> native_context() const;
bool at_to_number_conversion() const { return at_to_number_conversion_; }
@@ -453,7 +449,6 @@ class V8_EXPORT_PRIVATE FrameSummary {
Handle<Object> script() const;
int SourcePosition() const;
int SourceStatementPosition() const;
- Handle<String> FunctionName() const;
Handle<Context> native_context() const;
#define FRAME_SUMMARY_CAST(kind_, type, field, desc) \
@@ -964,7 +959,7 @@ class WasmFrame : public TypedFrame {
V8_EXPORT_PRIVATE WasmInstanceObject wasm_instance() const;
V8_EXPORT_PRIVATE wasm::NativeModule* native_module() const;
wasm::WasmCode* wasm_code() const;
- uint32_t function_index() const;
+ int function_index() const;
Script script() const;
// Byte position in the module, or asm.js source position.
int position() const override;
diff --git a/chromium/v8/src/execution/isolate-data.h b/chromium/v8/src/execution/isolate-data.h
index a6610c12f08..bc3e2b8d1a6 100644
--- a/chromium/v8/src/execution/isolate-data.h
+++ b/chromium/v8/src/execution/isolate-data.h
@@ -29,13 +29,19 @@ class Isolate;
// register.
class IsolateData final {
public:
- explicit IsolateData(Isolate* isolate) : stack_guard_(isolate) {}
+ IsolateData(Isolate* isolate, Address cage_base)
+ : cage_base_(cage_base), stack_guard_(isolate) {}
IsolateData(const IsolateData&) = delete;
IsolateData& operator=(const IsolateData&) = delete;
static constexpr intptr_t kIsolateRootBias = kRootRegisterBias;
+ // The value of kPointerCageBaseRegister
+ Address cage_base() const {
+ return COMPRESS_POINTERS_BOOL ? cage_base_ : kNullAddress;
+ }
+
// The value of the kRootRegister.
Address isolate_root() const {
return reinterpret_cast<Address>(this) + kIsolateRootBias;
@@ -61,7 +67,7 @@ class IsolateData final {
return kBuiltinEntryTableOffset - kIsolateRootBias;
}
static constexpr int builtin_entry_slot_offset(Builtins::Name builtin_index) {
- CONSTEXPR_DCHECK(Builtins::IsBuiltinId(builtin_index));
+ DCHECK(Builtins::IsBuiltinId(builtin_index));
return builtin_entry_table_offset() + builtin_index * kSystemPointerSize;
}
@@ -70,6 +76,13 @@ class IsolateData final {
return kBuiltinsTableOffset - kIsolateRootBias;
}
+ // Root-register-relative offset of the external pointer table.
+#ifdef V8_HEAP_SANDBOX
+ static constexpr int external_pointer_table_offset() {
+ return kExternalPointerTableOffset - kIsolateRootBias;
+ }
+#endif
+
static constexpr int fast_c_call_caller_fp_offset() {
return kFastCCallCallerFPOffset - kIsolateRootBias;
}
@@ -82,6 +95,10 @@ class IsolateData final {
return kFastApiCallTargetOffset - kIsolateRootBias;
}
+ static constexpr int cage_base_offset() {
+ return kCageBaseOffset - kIsolateRootBias;
+ }
+
// Root-register-relative offset of the given builtin table entry.
// TODO(ishell): remove in favour of typified id version.
static int builtin_slot_offset(int builtin_index) {
@@ -142,6 +159,8 @@ class IsolateData final {
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
V(kFastApiCallTargetOffset, kSystemPointerSize) \
+ V(kCageBaseOffset, kSystemPointerSize) \
+ V(kLongTaskStatsCounterOffset, kSizetSize) \
V(kStackGuardOffset, StackGuard::kSizeInBytes) \
V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize) \
V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes) \
@@ -180,6 +199,12 @@ class IsolateData final {
Address fast_c_call_caller_pc_ = kNullAddress;
Address fast_api_call_target_ = kNullAddress;
+ Address cage_base_ = kNullAddress;
+
+ // Used for implementation of LongTaskStats. Counts the number of potential
+ // long tasks.
+ size_t long_task_stats_counter_ = 0;
+
// Fields related to the system and JS stack. In particular, this contains
// the stack limit used by stack checks in generated code.
StackGuard stack_guard_;
@@ -245,6 +270,9 @@ void IsolateData::AssertPredictableLayout() {
kFastCCallCallerPCOffset);
STATIC_ASSERT(offsetof(IsolateData, fast_api_call_target_) ==
kFastApiCallTargetOffset);
+ STATIC_ASSERT(offsetof(IsolateData, cage_base_) == kCageBaseOffset);
+ STATIC_ASSERT(offsetof(IsolateData, long_task_stats_counter_) ==
+ kLongTaskStatsCounterOffset);
STATIC_ASSERT(offsetof(IsolateData, stack_guard_) == kStackGuardOffset);
#ifdef V8_HEAP_SANDBOX
STATIC_ASSERT(offsetof(IsolateData, external_pointer_table_) ==
diff --git a/chromium/v8/src/execution/isolate-utils-inl.h b/chromium/v8/src/execution/isolate-utils-inl.h
index f199b525aab..1e91d494aaa 100644
--- a/chromium/v8/src/execution/isolate-utils-inl.h
+++ b/chromium/v8/src/execution/isolate-utils-inl.h
@@ -51,7 +51,7 @@ V8_INLINE Heap* GetHeapFromWritableObject(HeapObject object) {
heap_internals::MemoryChunk* chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
return chunk->GetHeap();
-#endif // V8_COMPRESS_POINTERS || V8_ENABLE_THIRD_PARTY_HEAP
+#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
}
V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
@@ -64,14 +64,14 @@ V8_INLINE Isolate* GetIsolateFromWritableObject(HeapObject object) {
return isolate;
#else
return Isolate::FromHeap(GetHeapFromWritableObject(object));
-#endif // V8_COMPRESS_POINTERS, V8_ENABLE_THIRD_PARTY_HEAP
+#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
}
V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
*isolate = Heap::GetIsolateFromWritableObject(object);
return true;
-#elif defined V8_COMPRESS_POINTERS
+#elif defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
*isolate = GetIsolateFromWritableObject(object);
return true;
#else
@@ -83,7 +83,18 @@ V8_INLINE bool GetIsolateFromHeapObject(HeapObject object, Isolate** isolate) {
}
*isolate = Isolate::FromHeap(chunk->GetHeap());
return true;
-#endif // V8_COMPRESS_POINTERS, V8_ENABLE_THIRD_PARTY_HEAP
+#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE, V8_ENABLE_THIRD_PARTY_HEAP
+}
+
+// Use this function instead of Internals::GetIsolateForHeapSandbox for internal
+// code, as this function is fully inlinable.
+V8_INLINE static Isolate* GetIsolateForHeapSandbox(HeapObject object) {
+#ifdef V8_HEAP_SANDBOX
+ return GetIsolateFromWritableObject(object);
+#else
+ // Not used in non-sandbox mode.
+ return nullptr;
+#endif
}
} // namespace internal
diff --git a/chromium/v8/src/execution/isolate.cc b/chromium/v8/src/execution/isolate.cc
index 6f133ece117..b4070372eee 100644
--- a/chromium/v8/src/execution/isolate.cc
+++ b/chromium/v8/src/execution/isolate.cc
@@ -7,7 +7,7 @@
#include <stdlib.h>
#include <atomic>
-#include <fstream> // NOLINT(readability/streams)
+#include <fstream>
#include <memory>
#include <sstream>
#include <string>
@@ -19,9 +19,11 @@
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
#include "src/base/logging.h"
+#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
#include "src/base/utils/random-number-generator.h"
+#include "src/bigint/bigint.h"
#include "src/builtins/builtins-promise.h"
#include "src/builtins/constants-table-builder.h"
#include "src/codegen/assembler-inl.h"
@@ -318,10 +320,6 @@ void Isolate::SetEmbeddedBlob(const uint8_t* code, uint32_t code_size,
}
}
#endif // DEBUG
-
- if (FLAG_experimental_flush_embedded_blob_icache) {
- FlushInstructionCache(const_cast<uint8_t*>(code), code_size);
- }
}
void Isolate::ClearEmbeddedBlob() {
@@ -1404,12 +1402,15 @@ Object Isolate::StackOverflow() {
Handle<JSFunction> fun = range_error_function();
Handle<Object> msg = factory()->NewStringFromAsciiChecked(
MessageFormatter::TemplateString(MessageTemplate::kStackOverflow));
+ Handle<Object> options = factory()->undefined_value();
Handle<Object> no_caller;
- Handle<Object> exception;
+ Handle<JSObject> exception;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
this, exception,
- ErrorUtils::Construct(this, fun, fun, msg, SKIP_NONE, no_caller,
+ ErrorUtils::Construct(this, fun, fun, msg, options, SKIP_NONE, no_caller,
ErrorUtils::StackTraceCollection::kSimple));
+ JSObject::AddProperty(this, exception, factory()->wasm_uncatchable_symbol(),
+ factory()->true_value(), NONE);
Throw(*exception);
@@ -1474,8 +1475,7 @@ void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
}
void Isolate::InvokeApiInterruptCallbacks() {
- RuntimeCallTimerScope runtimeTimer(
- this, RuntimeCallCounterId::kInvokeApiInterruptCallbacks);
+ RCS_SCOPE(this, RuntimeCallCounterId::kInvokeApiInterruptCallbacks);
// Note: callback below should be called outside of execution access lock.
while (true) {
InterruptEntry entry;
@@ -1675,10 +1675,36 @@ Object Isolate::ReThrow(Object exception) {
return ReadOnlyRoots(heap()).exception();
}
+namespace {
+// This scope will set the thread-in-wasm flag after the execution of all
+// destructors. The thread-in-wasm flag is only set when the scope gets enabled.
+class SetThreadInWasmFlagScope {
+ public:
+ SetThreadInWasmFlagScope() {
+ DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
+ !trap_handler::IsThreadInWasm());
+ }
+
+ ~SetThreadInWasmFlagScope() {
+ if (enabled_) trap_handler::SetThreadInWasm();
+ }
+
+ void Enable() { enabled_ = true; }
+
+ private:
+ bool enabled_ = false;
+};
+} // namespace
+
Object Isolate::UnwindAndFindHandler() {
+ // Create the {SetThreadInWasmFlagScope} first in this function so that its
+ // destructor gets called after all the other destructors. It is important
+ // that the destructor sets the thread-in-wasm flag after all other
+ // destructors. The other destructors may cause exceptions, e.g. ASan on
+ // Windows, which would invalidate the thread-in-wasm flag when the wasm trap
+ // handler handles such non-wasm exceptions.
+ SetThreadInWasmFlagScope set_thread_in_wasm_flag_scope;
Object exception = pending_exception();
- DCHECK_IMPLIES(trap_handler::IsTrapHandlerEnabled(),
- !trap_handler::IsThreadInWasm());
auto FoundHandler = [&](Context context, Address instruction_start,
intptr_t handler_offset,
@@ -1771,9 +1797,10 @@ Object Isolate::UnwindAndFindHandler() {
StandardFrameConstants::kFixedFrameSizeAboveFp -
wasm_code->stack_slots() * kSystemPointerSize;
- // This is going to be handled by Wasm, so we need to set the TLS flag.
- trap_handler::SetThreadInWasm();
-
+ // This is going to be handled by WebAssembly, so we need to set the TLS
+ // flag. The {SetThreadInWasmFlagScope} will set the flag after all
+ // destructors have been executed.
+ set_thread_in_wasm_flag_scope.Enable();
return FoundHandler(Context(), wasm_code->instruction_start(), offset,
wasm_code->constant_pool(), return_sp, frame->fp());
}
@@ -1886,7 +1913,7 @@ Object Isolate::UnwindAndFindHandler() {
static_cast<int>(offset));
Code code =
- builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
+ builtins()->builtin(Builtins::kInterpreterEnterAtBytecode);
return FoundHandler(context, code.InstructionStart(), 0,
code.constant_pool(), return_sp, frame->fp());
}
@@ -2550,6 +2577,29 @@ void Isolate::SetAbortOnUncaughtExceptionCallback(
abort_on_uncaught_exception_callback_ = callback;
}
+void Isolate::InstallConditionalFeatures(Handle<Context> context) {
+ Handle<JSGlobalObject> global = handle(context->global_object(), this);
+ Handle<String> sab_name = factory()->SharedArrayBuffer_string();
+ if (IsSharedArrayBufferConstructorEnabled(context)) {
+ if (!JSObject::HasRealNamedProperty(global, sab_name).FromMaybe(true)) {
+ JSObject::AddProperty(this, global, factory()->SharedArrayBuffer_string(),
+ shared_array_buffer_fun(), DONT_ENUM);
+ }
+ }
+}
+
+bool Isolate::IsSharedArrayBufferConstructorEnabled(Handle<Context> context) {
+ if (!FLAG_harmony_sharedarraybuffer) return false;
+
+ if (!FLAG_enable_sharedarraybuffer_per_context) return true;
+
+ if (sharedarraybuffer_constructor_enabled_callback()) {
+ v8::Local<v8::Context> api_context = v8::Utils::ToLocal(context);
+ return sharedarraybuffer_constructor_enabled_callback()(api_context);
+ }
+ return false;
+}
+
bool Isolate::IsWasmSimdEnabled(Handle<Context> context) {
#if V8_ENABLE_WEBASSEMBLY
if (wasm_simd_enabled_callback()) {
@@ -2667,9 +2717,10 @@ void Isolate::UnregisterManagedPtrDestructor(ManagedPtrDestructor* destructor) {
}
#if V8_ENABLE_WEBASSEMBLY
-void Isolate::SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine) {
+void Isolate::SetWasmEngine(wasm::WasmEngine* engine) {
DCHECK_NULL(wasm_engine_); // Only call once before {Init}.
- wasm_engine_ = std::move(engine);
+ DCHECK_NOT_NULL(engine);
+ wasm_engine_ = engine;
wasm_engine_->AddIsolate(this);
}
@@ -2683,7 +2734,6 @@ void Isolate::AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object) {
}
#endif // V8_ENABLE_WEBASSEMBLY
-// NOLINTNEXTLINE
Isolate::PerIsolateThreadData::~PerIsolateThreadData() {
#if defined(USE_SIMULATOR)
delete simulator_;
@@ -2854,16 +2904,28 @@ std::atomic<size_t> Isolate::non_disposed_isolates_;
#endif // DEBUG
// static
-Isolate* Isolate::New() {
+Isolate* Isolate::New() { return Isolate::Allocate(false); }
+
+// static
+Isolate* Isolate::NewShared(const v8::Isolate::CreateParams& params) {
+ Isolate* isolate = Isolate::Allocate(true);
+ v8::Isolate::Initialize(reinterpret_cast<v8::Isolate*>(isolate), params);
+ return isolate;
+}
+
+// static
+Isolate* Isolate::Allocate(bool is_shared) {
// IsolateAllocator allocates the memory for the Isolate object according to
// the given allocation mode.
std::unique_ptr<IsolateAllocator> isolate_allocator =
std::make_unique<IsolateAllocator>();
// Construct Isolate object in the allocated memory.
void* isolate_ptr = isolate_allocator->isolate_memory();
- Isolate* isolate = new (isolate_ptr) Isolate(std::move(isolate_allocator));
+ Isolate* isolate =
+ new (isolate_ptr) Isolate(std::move(isolate_allocator), is_shared);
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
DCHECK(IsAligned(isolate->isolate_root(), kPtrComprCageBaseAlignment));
+ DCHECK_EQ(isolate->isolate_root(), isolate->cage_base());
#endif
#ifdef DEBUG
@@ -2919,12 +2981,13 @@ void Isolate::SetUpFromReadOnlyArtifacts(
heap_.SetUpFromReadOnlyHeap(read_only_heap_);
}
-v8::PageAllocator* Isolate::page_allocator() {
+v8::PageAllocator* Isolate::page_allocator() const {
return isolate_allocator_->page_allocator();
}
-Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
- : isolate_data_(this),
+Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator,
+ bool is_shared)
+ : isolate_data_(this, isolate_allocator->GetPtrComprCageBase()),
isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
allocator_(new TracingAccountingAllocator(this)),
@@ -2941,7 +3004,8 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
#endif
next_module_async_evaluating_ordinal_(
SourceTextModule::kFirstAsyncEvaluatingOrdinal),
- cancelable_task_manager_(new CancelableTaskManager()) {
+ cancelable_task_manager_(new CancelableTaskManager()),
+ is_shared_(is_shared) {
TRACE_ISOLATE(constructor);
CheckIsolateLayout();
@@ -2951,6 +3015,18 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
handle_scope_data_.Initialize();
+ // When pointer compression is on with a per-Isolate cage, allocation in the
+ // shared Isolate can point into the per-Isolate RO heap as the offsets are
+ // constant across Isolates.
+ //
+ // When pointer compression is on with a shared cage or when pointer
+ // compression is off, a shared RO heap is required. Otherwise a shared
+ // allocation requested by a client Isolate could point into the client
+ // Isolate's RO space (e.g. an RO map) whose pages gets unmapped when it is
+ // disposed.
+ CHECK_IMPLIES(is_shared_, COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL ||
+ V8_SHARED_RO_HEAP_BOOL);
+
#define ISOLATE_INIT_EXECUTE(type, name, initial_value) \
name##_ = (initial_value);
ISOLATE_INIT_LIST(ISOLATE_INIT_EXECUTE)
@@ -2979,6 +3055,11 @@ void Isolate::CheckIsolateLayout() {
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_pc_)),
Internals::kIsolateFastCCallCallerPcOffset);
+ CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.cage_base_)),
+ Internals::kIsolateCageBaseOffset);
+ CHECK_EQ(static_cast<int>(
+ OFFSET_OF(Isolate, isolate_data_.long_task_stats_counter_)),
+ Internals::kIsolateLongTaskStatsCounterOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
Internals::kIsolateStackGuardOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
@@ -3020,10 +3101,10 @@ void Isolate::Deinit() {
#if defined(V8_OS_WIN64)
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
- heap()->memory_allocator() && RequiresCodeRange()) {
- const base::AddressRegion& code_range =
- heap()->memory_allocator()->code_range();
- void* start = reinterpret_cast<void*>(code_range.begin());
+ heap()->memory_allocator() && RequiresCodeRange() &&
+ heap()->code_range()->AtomicDecrementUnwindInfoUseCount() == 1) {
+ const base::AddressRegion& code_region = heap()->code_region();
+ void* start = reinterpret_cast<void*>(code_region.begin());
win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
}
#endif // V8_OS_WIN64
@@ -3044,6 +3125,9 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
+ // All client isolates should already be detached.
+ DCHECK_NULL(client_isolate_head_);
+
// Help sweeper threads complete sweeping to stop faster.
heap_.mark_compact_collector()->DrainSweepingWorklists();
heap_.mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
@@ -3090,6 +3174,10 @@ void Isolate::Deinit() {
main_thread_local_isolate_->heap()->FreeLinearAllocationArea();
+ if (shared_isolate_) {
+ DetachFromSharedIsolate();
+ }
+
heap_.TearDown();
main_thread_local_isolate_.reset();
@@ -3098,10 +3186,7 @@ void Isolate::Deinit() {
if (logfile != nullptr) base::Fclose(logfile);
#if V8_ENABLE_WEBASSEMBLY
- if (wasm_engine_) {
- wasm_engine_->RemoveIsolate(this);
- wasm_engine_.reset();
- }
+ wasm_engine_->RemoveIsolate(this);
#endif // V8_ENABLE_WEBASSEMBLY
TearDownEmbeddedBlob();
@@ -3182,6 +3267,8 @@ Isolate::~Isolate() {
delete thread_manager_;
thread_manager_ = nullptr;
+ bigint_processor_->Destroy();
+
delete global_handles_;
global_handles_ = nullptr;
delete eternal_handles_;
@@ -3389,8 +3476,9 @@ void Isolate::MaybeRemapEmbeddedBuiltinsIntoCodeRange() {
CHECK_NOT_NULL(embedded_blob_code_);
CHECK_NE(embedded_blob_code_size_, 0);
- embedded_blob_code_ = heap_.RemapEmbeddedBuiltinsIntoCodeRange(
- embedded_blob_code_, embedded_blob_code_size_);
+ DCHECK_NOT_NULL(heap_.code_range_);
+ embedded_blob_code_ = heap_.code_range_->RemapEmbeddedBuiltins(
+ this, embedded_blob_code_, embedded_blob_code_size_);
CHECK_NOT_NULL(embedded_blob_code_);
// The un-embedded code blob is already a part of the registered code range
// so it's not necessary to register it again.
@@ -3474,6 +3562,21 @@ using MapOfLoadsAndStoresPerFunction =
std::map<std::string /* function_name */,
std::pair<uint64_t /* loads */, uint64_t /* stores */>>;
MapOfLoadsAndStoresPerFunction* stack_access_count_map = nullptr;
+
+class BigIntPlatform : public bigint::Platform {
+ public:
+ explicit BigIntPlatform(Isolate* isolate) : isolate_(isolate) {}
+ ~BigIntPlatform() override = default;
+
+ bool InterruptRequested() override {
+ StackLimitCheck interrupt_check(isolate_);
+ return (interrupt_check.InterruptRequested() &&
+ isolate_->stack_guard()->HasTerminationRequest());
+ }
+
+ private:
+ Isolate* isolate_;
+};
} // namespace
bool Isolate::Init(SnapshotData* startup_snapshot_data,
@@ -3522,6 +3625,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
string_table_.reset(new StringTable(this));
+ bigint_processor_ = bigint::Processor::New(new BigIntPlatform(this));
compiler_dispatcher_ =
new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
@@ -3531,7 +3635,7 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
metrics_recorder_ = std::make_shared<metrics::Recorder>();
- { // NOLINT
+ {
// Ensure that the thread has a valid stack guard. The v8::Locker object
// will ensure this too, but we don't have to use lockers if we are only
// using one thread.
@@ -3546,24 +3650,32 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
heap_.SetUpSpaces();
if (V8_SHORT_BUILTIN_CALLS_BOOL && FLAG_short_builtin_calls) {
- // Check if the system has more than 4GB of physical memory by comaring
- // the old space size with respective threshod value.
- is_short_builtin_calls_enabled_ =
- heap_.MaxOldGenerationSize() >= kShortBuiltinCallsOldSpaceSizeThreshold;
+ // Check if the system has more than 4GB of physical memory by comparing the
+ // old space size with respective threshold value.
+ //
+ // Additionally, enable if there is already a process-wide CodeRange that
+ // has re-embedded builtins.
+ is_short_builtin_calls_enabled_ = (heap_.MaxOldGenerationSize() >=
+ kShortBuiltinCallsOldSpaceSizeThreshold);
+ if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
+ std::shared_ptr<CodeRange> code_range =
+ CodeRange::GetProcessWideCodeRange();
+ if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
+ is_short_builtin_calls_enabled_ = true;
+ }
+ }
}
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
main_thread_local_isolate_.reset(new LocalIsolate(this, ThreadKind::kMain));
main_thread_local_heap()->Unpark();
+ heap_.InitializeMainThreadLocalHeap(main_thread_local_heap());
+
isolate_data_.external_reference_table()->Init(this);
#if V8_ENABLE_WEBASSEMBLY
- // Setup the wasm engine.
- if (wasm_engine_ == nullptr) {
- SetWasmEngine(wasm::WasmEngine::GetWasmEngine());
- }
- DCHECK_NOT_NULL(wasm_engine_);
+ SetWasmEngine(wasm::WasmEngine::GetWasmEngine());
#endif // V8_ENABLE_WEBASSEMBLY
if (setup_delegate_ == nullptr) {
@@ -3721,11 +3833,11 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
}
#if defined(V8_OS_WIN64)
- if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
- const base::AddressRegion& code_range =
- heap()->memory_allocator()->code_range();
- void* start = reinterpret_cast<void*>(code_range.begin());
- size_t size_in_bytes = code_range.size();
+ if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
+ heap()->code_range()->AtomicIncrementUnwindInfoUseCount() == 0) {
+ const base::AddressRegion& code_region = heap()->code_region();
+ void* start = reinterpret_cast<void*>(code_region.begin());
+ size_t size_in_bytes = code_region.size();
win64_unwindinfo::RegisterNonABICompliantCodeRange(start, size_in_bytes);
}
#endif // V8_OS_WIN64
@@ -3845,6 +3957,7 @@ void Isolate::DumpAndResetStats() {
wasm_engine()->DumpAndResetTurboStatistics();
}
#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_RUNTIME_CALL_STATS
if (V8_UNLIKELY(TracingFlags::runtime_stats.load(std::memory_order_relaxed) ==
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE)) {
counters()->worker_thread_runtime_call_stats()->AddToMainTable(
@@ -3852,6 +3965,7 @@ void Isolate::DumpAndResetStats() {
counters()->runtime_call_stats()->Print();
counters()->runtime_call_stats()->Reset();
}
+#endif // V8_RUNTIME_CALL_STATS
if (BasicBlockProfiler::Get()->HasData(this)) {
StdoutStream out;
BasicBlockProfiler::Get()->Print(out, this);
@@ -4121,19 +4235,23 @@ void Isolate::FireCallCompletedCallback(MicrotaskQueue* microtask_queue) {
}
}
-void Isolate::PromiseHookStateUpdated() {
- bool promise_hook_or_async_event_delegate =
- promise_hook_ || async_event_delegate_;
- bool promise_hook_or_debug_is_active_or_async_event_delegate =
- promise_hook_or_async_event_delegate || debug()->is_active();
- if (promise_hook_or_debug_is_active_or_async_event_delegate &&
- Protectors::IsPromiseHookIntact(this)) {
+void Isolate::UpdatePromiseHookProtector() {
+ if (Protectors::IsPromiseHookIntact(this)) {
HandleScope scope(this);
Protectors::InvalidatePromiseHook(this);
}
- promise_hook_or_async_event_delegate_ = promise_hook_or_async_event_delegate;
- promise_hook_or_debug_is_active_or_async_event_delegate_ =
- promise_hook_or_debug_is_active_or_async_event_delegate;
+}
+
+void Isolate::PromiseHookStateUpdated() {
+ promise_hook_flags_ =
+ (promise_hook_flags_ & PromiseHookFields::HasContextPromiseHook::kMask) |
+ PromiseHookFields::HasIsolatePromiseHook::encode(promise_hook_) |
+ PromiseHookFields::HasAsyncEventDelegate::encode(async_event_delegate_) |
+ PromiseHookFields::IsDebugActive::encode(debug()->is_active());
+
+ if (promise_hook_flags_ != 0) {
+ UpdatePromiseHookProtector();
+ }
}
namespace {
@@ -4260,11 +4378,15 @@ MaybeHandle<FixedArray> Isolate::GetImportAssertionsFromArgument(
Handle<JSReceiver> import_assertions_object_receiver =
Handle<JSReceiver>::cast(import_assertions_object);
- Handle<FixedArray> assertion_keys =
- KeyAccumulator::GetKeys(import_assertions_object_receiver,
- KeyCollectionMode::kOwnOnly, ENUMERABLE_STRINGS,
- GetKeysConversion::kConvertToString)
- .ToHandleChecked();
+ Handle<FixedArray> assertion_keys;
+ if (!KeyAccumulator::GetKeys(import_assertions_object_receiver,
+ KeyCollectionMode::kOwnOnly, ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString)
+ .ToHandle(&assertion_keys)) {
+ // This happens if the assertions object is a Proxy whose ownKeys() or
+ // getOwnPropertyDescriptor() trap throws.
+ return MaybeHandle<FixedArray>();
+ }
// The assertions will be passed to the host in the form: [key1,
// value1, key2, value2, ...].
@@ -4311,7 +4433,7 @@ void Isolate::SetHostImportModuleDynamicallyCallback(
MaybeHandle<JSObject> Isolate::RunHostInitializeImportMetaObjectCallback(
Handle<SourceTextModule> module) {
- CHECK(module->import_meta().IsTheHole(this));
+ CHECK(module->import_meta(kAcquireLoad).IsTheHole(this));
Handle<JSObject> import_meta = factory()->NewJSObjectWithNullProto();
if (host_initialize_import_meta_object_callback_ != nullptr) {
v8::Local<v8::Context> api_context =
@@ -4433,17 +4555,30 @@ void Isolate::SetPromiseHook(PromiseHook hook) {
PromiseHookStateUpdated();
}
+void Isolate::RunAllPromiseHooks(PromiseHookType type,
+ Handle<JSPromise> promise,
+ Handle<Object> parent) {
+ if (HasContextPromiseHooks()) {
+ native_context()->RunPromiseHook(type, promise, parent);
+ }
+ if (HasIsolatePromiseHooks() || HasAsyncEventDelegate()) {
+ RunPromiseHook(type, promise, parent);
+ }
+}
+
void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent) {
RunPromiseHookForAsyncEventDelegate(type, promise);
- if (promise_hook_ == nullptr) return;
+ if (!HasIsolatePromiseHooks()) return;
+ DCHECK(promise_hook_ != nullptr);
promise_hook_(type, v8::Utils::PromiseToLocal(promise),
v8::Utils::ToLocal(parent));
}
void Isolate::RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
Handle<JSPromise> promise) {
- if (!async_event_delegate_) return;
+ if (!HasAsyncEventDelegate()) return;
+ DCHECK(async_event_delegate_ != nullptr);
switch (type) {
case PromiseHookType::kResolve:
return;
@@ -4877,6 +5012,18 @@ MaybeLocal<v8::Context> Isolate::GetContextFromRecorderContextId(
return result->second.Get(reinterpret_cast<v8::Isolate*>(this));
}
+void Isolate::UpdateLongTaskStats() {
+ if (last_long_task_stats_counter_ != isolate_data_.long_task_stats_counter_) {
+ last_long_task_stats_counter_ = isolate_data_.long_task_stats_counter_;
+ long_task_stats_ = v8::metrics::LongTaskStats{};
+ }
+}
+
+v8::metrics::LongTaskStats* Isolate::GetCurrentLongTaskStats() {
+ UpdateLongTaskStats();
+ return &long_task_stats_;
+}
+
void Isolate::RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data) {
Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
uintptr_t context_id = reinterpret_cast<uintptr_t>(data.GetParameter());
@@ -4953,5 +5100,54 @@ Address Isolate::store_to_stack_count_address(const char* function_name) {
return reinterpret_cast<Address>(&map[name].second);
}
+void Isolate::AttachToSharedIsolate(Isolate* shared) {
+ DCHECK(shared->is_shared());
+ DCHECK_NULL(shared_isolate_);
+ shared->AppendAsClientIsolate(this);
+ shared_isolate_ = shared;
+ heap()->InitSharedSpaces();
+}
+
+void Isolate::DetachFromSharedIsolate() {
+ DCHECK_NOT_NULL(shared_isolate_);
+ shared_isolate_->RemoveAsClientIsolate(this);
+ shared_isolate_ = nullptr;
+ heap()->DeinitSharedSpaces();
+}
+
+void Isolate::AppendAsClientIsolate(Isolate* client) {
+ base::MutexGuard guard(&client_isolate_mutex_);
+
+ DCHECK_NULL(client->prev_client_isolate_);
+ DCHECK_NULL(client->next_client_isolate_);
+ DCHECK_NE(client_isolate_head_, client);
+
+ if (client_isolate_head_) {
+ client_isolate_head_->prev_client_isolate_ = client;
+ }
+
+ client->prev_client_isolate_ = nullptr;
+ client->next_client_isolate_ = client_isolate_head_;
+
+ client_isolate_head_ = client;
+}
+
+void Isolate::RemoveAsClientIsolate(Isolate* client) {
+ base::MutexGuard guard(&client_isolate_mutex_);
+
+ if (client->next_client_isolate_) {
+ client->next_client_isolate_->prev_client_isolate_ =
+ client->prev_client_isolate_;
+ }
+
+ if (client->prev_client_isolate_) {
+ client->prev_client_isolate_->next_client_isolate_ =
+ client->next_client_isolate_;
+ } else {
+ DCHECK_EQ(client_isolate_head_, client);
+ client_isolate_head_ = client->next_client_isolate_;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/execution/isolate.h b/chromium/v8/src/execution/isolate.h
index 61934d56190..f55af20acdf 100644
--- a/chromium/v8/src/execution/isolate.h
+++ b/chromium/v8/src/execution/isolate.h
@@ -13,7 +13,6 @@
#include <unordered_map>
#include <vector>
-#include "include/v8-inspector.h"
#include "include/v8-internal.h"
#include "include/v8-metrics.h"
#include "include/v8.h"
@@ -48,12 +47,20 @@ class UMemory;
} // namespace U_ICU_NAMESPACE
#endif // V8_INTL_SUPPORT
+namespace v8_inspector {
+class V8Inspector;
+} // namespace v8_inspector
+
namespace v8 {
namespace base {
class RandomNumberGenerator;
} // namespace base
+namespace bigint {
+class Processor;
+}
+
namespace debug {
class ConsoleDelegate;
class AsyncEventDelegate;
@@ -430,6 +437,8 @@ using DebugObjectCache = std::vector<Handle<HeapObject>>;
V(AllowWasmCodeGenerationCallback, allow_wasm_code_gen_callback, nullptr) \
V(ExtensionCallback, wasm_module_callback, &NoExtension) \
V(ExtensionCallback, wasm_instance_callback, &NoExtension) \
+ V(SharedArrayBufferConstructorEnabledCallback, \
+ sharedarraybuffer_constructor_enabled_callback, nullptr) \
V(WasmStreamingCallback, wasm_streaming_callback, nullptr) \
V(WasmLoadSourceMapCallback, wasm_load_source_map_callback, nullptr) \
V(WasmSimdEnabledCallback, wasm_simd_enabled_callback, nullptr) \
@@ -548,6 +557,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// new operator.
static Isolate* New();
+ // Creates a new shared Isolate object.
+ static Isolate* NewShared(const v8::Isolate::CreateParams& params);
+
// Deletes Isolate object. Must be used instead of delete operator.
// Destroys the non-default isolates.
// Sets default isolate into "has_been_disposed" state rather then destroying,
@@ -559,7 +571,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void set_read_only_heap(ReadOnlyHeap* ro_heap) { read_only_heap_ = ro_heap; }
// Page allocator that must be used for allocating V8 heap pages.
- v8::PageAllocator* page_allocator();
+ v8::PageAllocator* page_allocator() const;
// Returns the PerIsolateThreadData for the current thread (or nullptr if one
// is not currently set).
@@ -659,12 +671,12 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
base::SharedMutex* map_updater_access() { return &map_updater_access_; }
// The isolate's string table.
- StringTable* string_table() { return string_table_.get(); }
+ StringTable* string_table() const { return string_table_.get(); }
Address get_address_from_id(IsolateAddressId id);
// Access to top context (where the current function object was created).
- Context context() { return thread_local_top()->context_; }
+ Context context() const { return thread_local_top()->context_; }
inline void set_context(Context context);
Context* context_address() { return &thread_local_top()->context_; }
@@ -681,6 +693,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
inline void set_pending_exception(Object exception_obj);
inline void clear_pending_exception();
+ void InstallConditionalFeatures(Handle<Context> context);
+
+ bool IsSharedArrayBufferConstructorEnabled(Handle<Context> context);
+
bool IsWasmSimdEnabled(Handle<Context> context);
bool AreWasmExceptionsEnabled(Handle<Context> context);
@@ -1021,6 +1037,25 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
const IsolateData* isolate_data() const { return &isolate_data_; }
IsolateData* isolate_data() { return &isolate_data_; }
+ // When pointer compression is on, this is the base address of the pointer
+ // compression cage, and the kPtrComprCageBaseRegister is set to this
+ // value. When pointer compression is off, this is always kNullAddress.
+ Address cage_base() const {
+ DCHECK_IMPLIES(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL &&
+ !COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL,
+ isolate_data()->cage_base() == kNullAddress);
+ return isolate_data()->cage_base();
+ }
+
+ // When pointer compression is on, the PtrComprCage used by this
+ // Isolate. Otherwise nullptr.
+ VirtualMemoryCage* GetPtrComprCage() {
+ return isolate_allocator_->GetPtrComprCage();
+ }
+ const VirtualMemoryCage* GetPtrComprCage() const {
+ return isolate_allocator_->GetPtrComprCage();
+ }
+
// Generated code can embed this address to get access to the isolate-specific
// data (for example, roots, external references, builtins, etc.).
// The kRootRegister is set to this value.
@@ -1047,7 +1082,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
sizeof(IsolateData));
}
- Object root(RootIndex index) { return Object(roots_table()[index]); }
+ Object root(RootIndex index) const { return Object(roots_table()[index]); }
Handle<Object> root_handle(RootIndex index) {
return Handle<Object>(&roots_table()[index]);
@@ -1063,8 +1098,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool IsBuiltinsTableHandleLocation(Address* handle_location);
- StubCache* load_stub_cache() { return load_stub_cache_; }
- StubCache* store_stub_cache() { return store_stub_cache_; }
+ StubCache* load_stub_cache() const { return load_stub_cache_; }
+ StubCache* store_stub_cache() const { return store_stub_cache_; }
Deoptimizer* GetAndClearCurrentDeoptimizer() {
Deoptimizer* result = current_deoptimizer_;
CHECK_NOT_NULL(result);
@@ -1101,32 +1136,34 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
THREAD_LOCAL_TOP_ADDRESS(Address, thread_in_wasm_flag_address)
- MaterializedObjectStore* materialized_object_store() {
+ MaterializedObjectStore* materialized_object_store() const {
return materialized_object_store_;
}
- DescriptorLookupCache* descriptor_lookup_cache() {
+ DescriptorLookupCache* descriptor_lookup_cache() const {
return descriptor_lookup_cache_;
}
HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
- HandleScopeImplementer* handle_scope_implementer() {
+ HandleScopeImplementer* handle_scope_implementer() const {
DCHECK(handle_scope_implementer_);
return handle_scope_implementer_;
}
- UnicodeCache* unicode_cache() { return unicode_cache_; }
+ UnicodeCache* unicode_cache() const { return unicode_cache_; }
InnerPointerToCodeCache* inner_pointer_to_code_cache() {
return inner_pointer_to_code_cache_;
}
- GlobalHandles* global_handles() { return global_handles_; }
+ GlobalHandles* global_handles() const { return global_handles_; }
- EternalHandles* eternal_handles() { return eternal_handles_; }
+ EternalHandles* eternal_handles() const { return eternal_handles_; }
- ThreadManager* thread_manager() { return thread_manager_; }
+ ThreadManager* thread_manager() const { return thread_manager_; }
+
+ bigint::Processor* bigint_processor() { return bigint_processor_; }
#ifndef V8_INTL_SUPPORT
unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
@@ -1147,14 +1184,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
Builtins* builtins() { return &builtins_; }
- RegExpStack* regexp_stack() { return regexp_stack_; }
+ RegExpStack* regexp_stack() const { return regexp_stack_; }
- size_t total_regexp_code_generated() { return total_regexp_code_generated_; }
+ size_t total_regexp_code_generated() const {
+ return total_regexp_code_generated_;
+ }
void IncreaseTotalRegexpCodeGenerated(Handle<HeapObject> code);
std::vector<int>* regexp_indices() { return &regexp_indices_; }
- Debug* debug() { return debug_; }
+ Debug* debug() const { return debug_; }
void* is_profiling_address() { return &is_profiling_; }
@@ -1178,8 +1217,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
v8::internal::Factory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
// undefined behavior (as static_cast cannot cast across private bases).
- // NOLINTNEXTLINE (google-readability-casting)
- return (v8::internal::Factory*)this; // NOLINT(readability/casting)
+ return (v8::internal::Factory*)this;
}
static const int kJSRegexpStaticOffsetsVectorSize = 128;
@@ -1192,7 +1230,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
isolate_data_.embedder_data_[slot] = data;
}
- void* GetData(uint32_t slot) {
+ void* GetData(uint32_t slot) const {
DCHECK_LT(slot, Internals::kNumIsolateDataSlots);
return isolate_data_.embedder_data_[slot];
}
@@ -1205,7 +1243,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
return snapshot_blob_ != nullptr && snapshot_blob_->raw_size != 0;
}
- bool IsDead() { return has_fatal_error_; }
+ bool IsDead() const { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
bool use_optimizer();
@@ -1261,11 +1299,11 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void MaybeInitializeVectorListFromHeap();
- double time_millis_since_init() {
+ double time_millis_since_init() const {
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
}
- DateCache* date_cache() { return date_cache_; }
+ DateCache* date_cache() const { return date_cache_; }
void set_date_cache(DateCache* date_cache);
@@ -1319,7 +1357,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
std::unique_ptr<PersistentHandles> NewPersistentHandles();
- PersistentHandlesList* persistent_handles_list() {
+ PersistentHandlesList* persistent_handles_list() const {
return persistent_handles_list_.get();
}
@@ -1437,21 +1475,27 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
#endif
- Address promise_hook_address() {
- return reinterpret_cast<Address>(&promise_hook_);
+ void SetHasContextPromiseHooks(bool context_promise_hook) {
+ promise_hook_flags_ = PromiseHookFields::HasContextPromiseHook::update(
+ promise_hook_flags_, context_promise_hook);
+ PromiseHookStateUpdated();
}
- Address async_event_delegate_address() {
- return reinterpret_cast<Address>(&async_event_delegate_);
+ bool HasContextPromiseHooks() const {
+ return PromiseHookFields::HasContextPromiseHook::decode(
+ promise_hook_flags_);
}
- Address promise_hook_or_async_event_delegate_address() {
- return reinterpret_cast<Address>(&promise_hook_or_async_event_delegate_);
+ Address promise_hook_flags_address() {
+ return reinterpret_cast<Address>(&promise_hook_flags_);
+ }
+
+ Address promise_hook_address() {
+ return reinterpret_cast<Address>(&promise_hook_);
}
- Address promise_hook_or_debug_is_active_or_async_event_delegate_address() {
- return reinterpret_cast<Address>(
- &promise_hook_or_debug_is_active_or_async_event_delegate_);
+ Address async_event_delegate_address() {
+ return reinterpret_cast<Address>(&async_event_delegate_);
}
Address handle_scope_implementer_address() {
@@ -1469,6 +1513,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void SetPromiseHook(PromiseHook hook);
void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
Handle<Object> parent);
+ void RunAllPromiseHooks(PromiseHookType type, Handle<JSPromise> promise,
+ Handle<Object> parent);
+ void UpdatePromiseHookProtector();
void PromiseHookStateUpdated();
void AddDetachedContext(Handle<Context> context);
@@ -1657,8 +1704,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
#if V8_ENABLE_WEBASSEMBLY
- wasm::WasmEngine* wasm_engine() const { return wasm_engine_.get(); }
- void SetWasmEngine(std::shared_ptr<wasm::WasmEngine> engine);
+ // TODO(wasm): Replace all uses by {WasmEngine::GetWasmEngine}?
+ wasm::WasmEngine* wasm_engine() const { return wasm_engine_; }
+ void SetWasmEngine(wasm::WasmEngine* engine);
void AddSharedWasmMemory(Handle<WasmMemoryObject> memory_object);
#endif // V8_ENABLE_WEBASSEMBLY
@@ -1692,6 +1740,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
MaybeLocal<v8::Context> GetContextFromRecorderContextId(
v8::metrics::Recorder::ContextId id);
+ void UpdateLongTaskStats();
+ v8::metrics::LongTaskStats* GetCurrentLongTaskStats();
+
LocalIsolate* main_thread_local_isolate() {
return main_thread_local_isolate_.get();
}
@@ -1715,8 +1766,24 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
}
#endif
+ struct PromiseHookFields {
+ using HasContextPromiseHook = base::BitField<bool, 0, 1>;
+ using HasIsolatePromiseHook = HasContextPromiseHook::Next<bool, 1>;
+ using HasAsyncEventDelegate = HasIsolatePromiseHook::Next<bool, 1>;
+ using IsDebugActive = HasAsyncEventDelegate::Next<bool, 1>;
+ };
+
+ bool is_shared() { return is_shared_; }
+ Isolate* shared_isolate() { return shared_isolate_; }
+
+ void AttachToSharedIsolate(Isolate* shared);
+ void DetachFromSharedIsolate();
+
+ bool HasClientIsolates() const { return client_isolate_head_; }
+
private:
- explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator);
+ explicit Isolate(std::unique_ptr<IsolateAllocator> isolate_allocator,
+ bool is_shared);
~Isolate();
bool Init(SnapshotData* startup_snapshot_data,
@@ -1727,6 +1794,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void InitializeCodeRanges();
void AddCodeMemoryRange(MemoryRange range);
+ // Common method to create an Isolate used by Isolate::New() and
+ // Isolate::NewShared().
+ static Isolate* Allocate(bool is_shared);
+
static void RemoveContextIdCallback(const v8::WeakCallbackInfo<void>& data);
class ThreadDataTable {
@@ -1798,6 +1869,16 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
void RunPromiseHookForAsyncEventDelegate(PromiseHookType type,
Handle<JSPromise> promise);
+ bool HasIsolatePromiseHooks() const {
+ return PromiseHookFields::HasIsolatePromiseHook::decode(
+ promise_hook_flags_);
+ }
+
+ bool HasAsyncEventDelegate() const {
+ return PromiseHookFields::HasAsyncEventDelegate::decode(
+ promise_hook_flags_);
+ }
+
const char* RAILModeName(RAILMode rail_mode) const {
switch (rail_mode) {
case PERFORMANCE_RESPONSE:
@@ -1817,6 +1898,10 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
// Returns the Exception sentinel.
Object ThrowInternal(Object exception, MessageLocation* location);
+ // Methods for appending and removing to/from client isolates list.
+ void AppendAsClientIsolate(Isolate* client);
+ void RemoveAsClientIsolate(Isolate* client);
+
// This class contains a collection of data accessible from both C++ runtime
// and compiled code (including assembly stubs, builtins, interpreter bytecode
// handlers and optimized code).
@@ -1862,6 +1947,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
GlobalHandles* global_handles_ = nullptr;
EternalHandles* eternal_handles_ = nullptr;
ThreadManager* thread_manager_ = nullptr;
+ bigint::Processor* bigint_processor_ = nullptr;
RuntimeState runtime_state_;
Builtins builtins_;
SetupIsolateDelegate* setup_delegate_ = nullptr;
@@ -2021,6 +2107,9 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
Persistent<v8::Context, v8::CopyablePersistentTraits<v8::Context>>>
recorder_context_id_map_;
+ size_t last_long_task_stats_counter_ = 0;
+ v8::metrics::LongTaskStats long_task_stats_;
+
std::vector<Object> startup_object_cache_;
// Used during builtins compilation to build the builtins constants table,
@@ -2050,8 +2139,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
debug::ConsoleDelegate* console_delegate_ = nullptr;
debug::AsyncEventDelegate* async_event_delegate_ = nullptr;
- bool promise_hook_or_async_event_delegate_ = false;
- bool promise_hook_or_debug_is_active_or_async_event_delegate_ = false;
+ uint32_t promise_hook_flags_ = 0;
int async_task_count_ = 0;
std::unique_ptr<LocalIsolate> main_thread_local_isolate_;
@@ -2069,7 +2157,7 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
size_t elements_deletion_counter_ = 0;
#if V8_ENABLE_WEBASSEMBLY
- std::shared_ptr<wasm::WasmEngine> wasm_engine_;
+ wasm::WasmEngine* wasm_engine_ = nullptr;
#endif // V8_ENABLE_WEBASSEMBLY
std::unique_ptr<TracingCpuProfilerImpl> tracing_cpu_profiler_;
@@ -2088,6 +2176,23 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
base::Mutex thread_data_table_mutex_;
ThreadDataTable thread_data_table_;
+ // Set to true if this isolate is used as shared heap.
+ const bool is_shared_;
+
+ // Stores the shared isolate for this client isolate. nullptr for shared
+ // isolates or when no shared isolate is used.
+ Isolate* shared_isolate_ = nullptr;
+
+ // A shared isolate will use these two fields to track all its client
+ // isolates.
+ base::Mutex client_isolate_mutex_;
+ Isolate* client_isolate_head_ = nullptr;
+
+ // Used to form a linked list of all client isolates. Protected by
+ // client_isolate_mutex_.
+ Isolate* prev_client_isolate_ = nullptr;
+ Isolate* next_client_isolate_ = nullptr;
+
// A signal-safe vector of heap pages containing code. Used with the
// v8::Unwinder API.
std::atomic<std::vector<MemoryRange>*> code_pages_{nullptr};
diff --git a/chromium/v8/src/execution/local-isolate-inl.h b/chromium/v8/src/execution/local-isolate-inl.h
index 318cc10fa4a..59a7b1f5e42 100644
--- a/chromium/v8/src/execution/local-isolate-inl.h
+++ b/chromium/v8/src/execution/local-isolate-inl.h
@@ -12,7 +12,7 @@
namespace v8 {
namespace internal {
-Address LocalIsolate::isolate_root() const { return isolate_->isolate_root(); }
+Address LocalIsolate::cage_base() const { return isolate_->cage_base(); }
ReadOnlyHeap* LocalIsolate::read_only_heap() const {
return isolate_->read_only_heap();
}
diff --git a/chromium/v8/src/execution/local-isolate.h b/chromium/v8/src/execution/local-isolate.h
index 91192d5f0f1..5392534d667 100644
--- a/chromium/v8/src/execution/local-isolate.h
+++ b/chromium/v8/src/execution/local-isolate.h
@@ -50,7 +50,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
LocalHeap* heap() { return &heap_; }
- inline Address isolate_root() const;
+ inline Address cage_base() const;
inline ReadOnlyHeap* read_only_heap() const;
inline Object root(RootIndex index) const;
@@ -62,8 +62,7 @@ class V8_EXPORT_PRIVATE LocalIsolate final : private HiddenLocalFactory {
v8::internal::LocalFactory* factory() {
// Upcast to the privately inherited base-class using c-style casts to avoid
// undefined behavior (as static_cast cannot cast across private bases).
- // NOLINTNEXTLINE (google-readability-casting)
- return (v8::internal::LocalFactory*)this; // NOLINT(readability/casting)
+ return (v8::internal::LocalFactory*)this;
}
bool has_pending_exception() const { return false; }
diff --git a/chromium/v8/src/execution/messages.cc b/chromium/v8/src/execution/messages.cc
index a8b7ad23ca4..f72ff6b02c3 100644
--- a/chromium/v8/src/execution/messages.cc
+++ b/chromium/v8/src/execution/messages.cc
@@ -186,8 +186,7 @@ void MessageHandler::ReportMessageNoExceptions(
FUNCTION_CAST<v8::MessageCallback>(callback_obj.foreign_address());
Handle<Object> callback_data(listener.get(1), isolate);
{
- RuntimeCallTimerScope timer(
- isolate, RuntimeCallCounterId::kMessageListenerCallback);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kMessageListenerCallback);
// Do not allow exceptions to propagate.
v8::TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
callback(api_message_obj, callback_data->IsUndefined(isolate)
@@ -483,7 +482,8 @@ MaybeHandle<String> MessageFormatter::Format(Isolate* isolate,
MaybeHandle<JSObject> ErrorUtils::Construct(Isolate* isolate,
Handle<JSFunction> target,
Handle<Object> new_target,
- Handle<Object> message) {
+ Handle<Object> message,
+ Handle<Object> options) {
FrameSkipMode mode = SKIP_FIRST;
Handle<Object> caller;
@@ -495,15 +495,15 @@ MaybeHandle<JSObject> ErrorUtils::Construct(Isolate* isolate,
caller = new_target;
}
- return ErrorUtils::Construct(isolate, target, new_target, message, mode,
- caller,
+ return ErrorUtils::Construct(isolate, target, new_target, message, options,
+ mode, caller,
ErrorUtils::StackTraceCollection::kDetailed);
}
MaybeHandle<JSObject> ErrorUtils::Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
- Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- StackTraceCollection stack_trace_collection) {
+ Handle<Object> message, Handle<Object> options, FrameSkipMode mode,
+ Handle<Object> caller, StackTraceCollection stack_trace_collection) {
if (FLAG_correctness_fuzzer_suppressions) {
// Abort range errors in correctness fuzzing, as their causes differ
// accross correctness-fuzzing scenarios.
@@ -536,7 +536,6 @@ MaybeHandle<JSObject> ErrorUtils::Construct(
// true, [[Enumerable]]: false, [[Configurable]]: true}.
// c. Perform ! DefinePropertyOrThrow(O, "message", msgDesc).
// 4. Return O.
-
if (!message->IsUndefined(isolate)) {
Handle<String> msg_string;
ASSIGN_RETURN_ON_EXCEPTION(isolate, msg_string,
@@ -548,6 +547,31 @@ MaybeHandle<JSObject> ErrorUtils::Construct(
JSObject);
}
+ if (FLAG_harmony_error_cause && !options->IsUndefined(isolate)) {
+ // If Type(options) is Object and ? HasProperty(options, "cause") then
+ // a. Let cause be ? Get(options, "cause").
+ // b. Perform ! CreateNonEnumerableDataPropertyOrThrow(O, "cause", cause).
+ Handle<Name> cause_string = isolate->factory()->cause_string();
+ if (options->IsJSReceiver()) {
+ Handle<JSReceiver> js_options = Handle<JSReceiver>::cast(options);
+ Maybe<bool> has_cause = JSObject::HasProperty(js_options, cause_string);
+ if (has_cause.IsNothing()) {
+ DCHECK((isolate)->has_pending_exception());
+ return MaybeHandle<JSObject>();
+ }
+ if (has_cause.ToChecked()) {
+ Handle<Object> cause;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, cause,
+ JSObject::GetProperty(isolate, js_options, cause_string), JSObject);
+ RETURN_ON_EXCEPTION(isolate,
+ JSObject::SetOwnPropertyIgnoreAttributes(
+ err, cause_string, cause, DONT_ENUM),
+ JSObject);
+ }
+ }
+ }
+
switch (stack_trace_collection) {
case StackTraceCollection::kDetailed:
RETURN_ON_EXCEPTION(
@@ -676,14 +700,15 @@ Handle<JSObject> ErrorUtils::MakeGenericError(
isolate->clear_pending_exception();
}
Handle<String> msg = DoFormatMessage(isolate, index, arg0, arg1, arg2);
+ Handle<Object> options = isolate->factory()->undefined_value();
DCHECK(mode != SKIP_UNTIL_SEEN);
Handle<Object> no_caller;
// The call below can't fail because constructor is a builtin.
DCHECK(constructor->shared().HasBuiltinId());
- return ErrorUtils::Construct(isolate, constructor, constructor, msg, mode,
- no_caller, StackTraceCollection::kDetailed)
+ return ErrorUtils::Construct(isolate, constructor, constructor, msg, options,
+ mode, no_caller, StackTraceCollection::kDetailed)
.ToHandleChecked();
}
diff --git a/chromium/v8/src/execution/messages.h b/chromium/v8/src/execution/messages.h
index 43f99d17972..ad2d76f59e2 100644
--- a/chromium/v8/src/execution/messages.h
+++ b/chromium/v8/src/execution/messages.h
@@ -77,11 +77,12 @@ class ErrorUtils : public AllStatic {
static MaybeHandle<JSObject> Construct(Isolate* isolate,
Handle<JSFunction> target,
Handle<Object> new_target,
- Handle<Object> message);
+ Handle<Object> message,
+ Handle<Object> options);
static MaybeHandle<JSObject> Construct(
Isolate* isolate, Handle<JSFunction> target, Handle<Object> new_target,
- Handle<Object> message, FrameSkipMode mode, Handle<Object> caller,
- StackTraceCollection stack_trace_collection);
+ Handle<Object> message, Handle<Object> options, FrameSkipMode mode,
+ Handle<Object> caller, StackTraceCollection stack_trace_collection);
static MaybeHandle<String> ToString(Isolate* isolate, Handle<Object> recv);
diff --git a/chromium/v8/src/execution/microtask-queue.cc b/chromium/v8/src/execution/microtask-queue.cc
index 496737b03a4..cae642e2c97 100644
--- a/chromium/v8/src/execution/microtask-queue.cc
+++ b/chromium/v8/src/execution/microtask-queue.cc
@@ -113,6 +113,16 @@ void MicrotaskQueue::EnqueueMicrotask(Microtask microtask) {
void MicrotaskQueue::PerformCheckpoint(v8::Isolate* v8_isolate) {
if (!IsRunningMicrotasks() && !GetMicrotasksScopeDepth() &&
!HasMicrotasksSuppressions()) {
+ std::unique_ptr<MicrotasksScope> microtasks_scope;
+ if (microtasks_policy_ == v8::MicrotasksPolicy::kScoped) {
+ // If we're using microtask scopes to schedule microtask execution, V8
+ // API calls will check that there's always a microtask scope on the
+ // stack. As the microtasks we're about to execute could invoke embedder
+ // callbacks which then calls back into V8, we create an artificial
+ // microtask scope here to avoid running into the CallDepthScope check.
+ microtasks_scope.reset(new v8::MicrotasksScope(
+ v8_isolate, this, v8::MicrotasksScope::kDoNotRunMicrotasks));
+ }
Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
RunMicrotasks(isolate);
isolate->ClearKeptObjects();
diff --git a/chromium/v8/src/execution/mips/simulator-mips.cc b/chromium/v8/src/execution/mips/simulator-mips.cc
index ca402dca040..3c9c2523102 100644
--- a/chromium/v8/src/execution/mips/simulator-mips.cc
+++ b/chromium/v8/src/execution/mips/simulator-mips.cc
@@ -47,7 +47,7 @@ uint32_t get_fcsr_condition_bit(uint32_t cc) {
// SScanF not being implemented in a platform independent was through
// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
// Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
+#define SScanF sscanf
// The MipsDebugger class is used by the simulator while debugging simulated
// code.
diff --git a/chromium/v8/src/execution/mips64/simulator-mips64.cc b/chromium/v8/src/execution/mips64/simulator-mips64.cc
index 198022b3a4e..2ab945f88de 100644
--- a/chromium/v8/src/execution/mips64/simulator-mips64.cc
+++ b/chromium/v8/src/execution/mips64/simulator-mips64.cc
@@ -64,7 +64,7 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
// SScanF not being implemented in a platform independent was through
// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
// Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
+#define SScanF sscanf
// The MipsDebugger class is used by the simulator while debugging simulated
// code.
diff --git a/chromium/v8/src/execution/ppc/simulator-ppc.cc b/chromium/v8/src/execution/ppc/simulator-ppc.cc
index c812af360ea..f9b9552433e 100644
--- a/chromium/v8/src/execution/ppc/simulator-ppc.cc
+++ b/chromium/v8/src/execution/ppc/simulator-ppc.cc
@@ -39,7 +39,7 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
+#define SScanF sscanf
// The PPCDebugger class is used by the simulator while debugging simulated
// PowerPC code.
@@ -1138,7 +1138,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
if (!ABI_PASSES_HANDLES_IN_REGS) {
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ arg[0] = bit_cast<intptr_t>(arg[0]);
}
target(arg[0], arg[1]);
} else if (redirection->type() ==
@@ -1157,7 +1157,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
if (!ABI_PASSES_HANDLES_IN_REGS) {
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ arg[0] = bit_cast<intptr_t>(arg[0]);
}
target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
} else {
@@ -2464,6 +2464,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DCHECK_EQ(instr->Bit(0), 1);
set_simd_register_by_lane<int64_t>(frt, 0,
static_cast<int64_t>(ra_val));
+ // Low 64 bits of the result is undefined,
+ // Which is simulated here by adding random bits.
+ set_simd_register_by_lane<int64_t>(
+ frt, 1, static_cast<int64_t>(0x123456789ABCD));
}
break;
}
@@ -3913,8 +3917,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case STVX: {
DECODE_VX_INSTRUCTION(vrs, ra, rb, S)
GET_ADDRESS(ra, rb, ra_val, rb_val)
- __int128 vrs_val =
- *(reinterpret_cast<__int128*>(get_simd_register(vrs).int8));
+ __int128 vrs_val = bit_cast<__int128>(get_simd_register(vrs).int8);
WriteQW((ra_val + rb_val) & 0xFFFFFFFFFFFFFFF0, vrs_val);
break;
}
@@ -3926,6 +3929,14 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
xt, 1, ReadDW(ra_val + rb_val + kSystemPointerSize));
break;
}
+ case LXVX: {
+ DECODE_VX_INSTRUCTION(vrt, ra, rb, T)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ intptr_t addr = ra_val + rb_val;
+ simdr_t* ptr = reinterpret_cast<simdr_t*>(addr);
+ set_simd_register(vrt, *ptr);
+ break;
+ }
case STXVD: {
DECODE_VX_INSTRUCTION(xs, ra, rb, S)
GET_ADDRESS(ra, rb, ra_val, rb_val)
@@ -3934,6 +3945,14 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
get_simd_register_by_lane<int64_t>(xs, 1));
break;
}
+ case STXVX: {
+ DECODE_VX_INSTRUCTION(vrs, ra, rb, S)
+ GET_ADDRESS(ra, rb, ra_val, rb_val)
+ intptr_t addr = ra_val + rb_val;
+ __int128 vrs_val = bit_cast<__int128>(get_simd_register(vrs).int8);
+ WriteQW(addr, vrs_val);
+ break;
+ }
case LXSIBZX: {
DECODE_VX_INSTRUCTION(xt, ra, rb, T)
GET_ADDRESS(ra, rb, ra_val, rb_val)
@@ -3982,6 +4001,15 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
WriteDW(ra_val + rb_val, get_simd_register_by_lane<int64_t>(xs, 0));
break;
}
+ case XXBRQ: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ __int128 xb_val = bit_cast<__int128>(get_simd_register(b).int8);
+ __int128 xb_val_reversed = __builtin_bswap128(xb_val);
+ simdr_t simdr_xb = bit_cast<simdr_t>(xb_val_reversed);
+ set_simd_register(t, simdr_xb);
+ break;
+ }
#define VSPLT(type) \
uint32_t uim = instr->Bits(20, 16); \
int vrt = instr->RTValue(); \
@@ -4749,7 +4777,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
DECODE_VX_INSTRUCTION(t, a, b, T)
uint16_t result_bits = 0;
unsigned __int128 src_bits =
- *(reinterpret_cast<__int128*>(get_simd_register(a).int8));
+ bit_cast<__int128>(get_simd_register(a).int8);
for (int i = 0; i < kSimd128Size; i++) {
result_bits <<= 1;
uint8_t selected_bit_index = get_simd_register_by_lane<uint8_t>(b, i);
@@ -4871,6 +4899,14 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
VECTOR_UNARY_OP(float, base::RecipSqrt)
break;
}
+ case VNEGW: {
+ VECTOR_UNARY_OP(int32_t, -)
+ break;
+ }
+ case VNEGD: {
+ VECTOR_UNARY_OP(int64_t, -)
+ break;
+ }
#undef VECTOR_UNARY_OP
#define VECTOR_ROUNDING_AVERAGE(intermediate_type, result_type) \
DECODE_VX_INSTRUCTION(t, a, b, T) \
@@ -4911,7 +4947,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
}
-} // NOLINT
+}
void Simulator::Trace(Instruction* instr) {
disasm::NameConverter converter;
@@ -4981,7 +5017,7 @@ void Simulator::CallInternal(Address entry) {
// Prepare to execute the code at entry
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// entry is the function descriptor
- set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+ set_pc(*(bit_cast<intptr_t*>(entry)));
} else {
// entry is the instruction address
set_pc(static_cast<intptr_t>(entry));
diff --git a/chromium/v8/src/execution/ppc/simulator-ppc.h b/chromium/v8/src/execution/ppc/simulator-ppc.h
index bacd844be05..2dea5c00ca2 100644
--- a/chromium/v8/src/execution/ppc/simulator-ppc.h
+++ b/chromium/v8/src/execution/ppc/simulator-ppc.h
@@ -321,6 +321,18 @@ class Simulator : public SimulatorBase {
}
}
+ // Byte Reverse.
+ static inline __uint128_t __builtin_bswap128(__uint128_t v) {
+ union {
+ uint64_t u64[2];
+ __uint128_t u128;
+ } res, val;
+ val.u128 = v;
+ res.u64[0] = __builtin_bswap64(val.u64[1]);
+ res.u64[1] = __builtin_bswap64(val.u64[0]);
+ return res.u128;
+ }
+
#define RW_VAR_LIST(V) \
V(QWU, unsigned __int128) \
V(QW, __int128) \
diff --git a/chromium/v8/src/execution/protectors.h b/chromium/v8/src/execution/protectors.h
index c4ca49d948a..aa89275c110 100644
--- a/chromium/v8/src/execution/protectors.h
+++ b/chromium/v8/src/execution/protectors.h
@@ -26,6 +26,8 @@ class Protectors : public AllStatic {
is_concat_spreadable_protector) \
V(NoElements, NoElementsProtector, no_elements_protector) \
\
+ V(MegaDOM, MegaDOMProtector, mega_dom_protector) \
+ \
/* The MapIterator protector protects the original iteration behaviors */ \
/* of Map.prototype.keys(), Map.prototype.values(), and */ \
/* Set.prototype.entries(). It does not protect the original iteration */ \
diff --git a/chromium/v8/src/execution/riscv64/simulator-riscv64.cc b/chromium/v8/src/execution/riscv64/simulator-riscv64.cc
index 1d38d8c0ca0..e8f16155bef 100644
--- a/chromium/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/chromium/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -93,7 +93,7 @@ static inline int32_t get_ebreak_code(Instruction* instr) {
// SScanF not being implemented in a platform independent was through
// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
// Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
+#define SScanF sscanf
// The RiscvDebugger class is used by the simulator while debugging simulated
// code.
@@ -261,6 +261,10 @@ void RiscvDebugger::Debug() {
disasm::Disassembler dasm(converter);
// Use a reasonably large buffer.
v8::internal::EmbeddedVector<char, 256> buffer;
+ const char* name = sim_->builtins_.Lookup((Address)sim_->get_pc());
+ if (name != nullptr) {
+ PrintF("Call builtin: %s\n", name);
+ }
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin());
last_pc = sim_->get_pc();
@@ -785,7 +789,7 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
}
}
-Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate), builtins_(isolate) {
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
stack_size_ = FLAG_sim_stack_size * KB;
@@ -1062,7 +1066,7 @@ T Simulator::FMaxMinHelper(T a, T b, MaxMinKind kind) {
T result = 0;
if (std::isnan(a) && std::isnan(b)) {
- result = a;
+ result = std::numeric_limits<float>::quiet_NaN();
} else if (std::isnan(a)) {
result = b;
} else if (std::isnan(b)) {
@@ -1101,7 +1105,7 @@ int64_t Simulator::get_pc() const { return registers_[pc]; }
// TODO(plind): refactor this messy debug code when we do unaligned access.
void Simulator::DieOrDebug() {
- if ((1)) { // Flag for this was removed.
+ if (FLAG_riscv_trap_to_simulator_debugger) {
RiscvDebugger dbg(this);
dbg.Debug();
} else {
@@ -2819,6 +2823,33 @@ void Simulator::DecodeRVIType() {
// Note: No need to shift 2 for JALR's imm12, but set lowest bit to 0.
int64_t next_pc = (rs1() + imm12()) & ~reg_t(1);
set_pc(next_pc);
+ if (::v8::internal::FLAG_trace_sim) {
+ if ((rs1_reg() != ra || imm12() != 0)) {
+ const char* name = builtins_.Lookup((Address)next_pc);
+ if (name != nullptr) {
+ int64_t arg0 = get_register(a0);
+ int64_t arg1 = get_register(a1);
+ int64_t arg2 = get_register(a2);
+ int64_t arg3 = get_register(a3);
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t* stack_pointer =
+ reinterpret_cast<int64_t*>(get_register(sp));
+ int64_t arg8 = stack_pointer[0];
+ int64_t arg9 = stack_pointer[1];
+ PrintF(
+ "Call to Builtin at %s "
+ "a0 %08" PRIx64 " ,a1 %08" PRIx64 " ,a2 %08" PRIx64
+ " ,a3 %08" PRIx64 " ,a4 %08" PRIx64 " ,a5 %08" PRIx64
+ " ,a6 %08" PRIx64 " ,a7 %08" PRIx64 " ,0(sp) %08" PRIx64
+ " ,8(sp) %08" PRIx64 " ,sp %08" PRIx64 ",fp %08" PRIx64 " \n",
+ name, arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8,
+ arg9, get_register(sp), get_register(fp));
+ }
+ }
+ }
break;
}
case RO_LB: {
diff --git a/chromium/v8/src/execution/riscv64/simulator-riscv64.h b/chromium/v8/src/execution/riscv64/simulator-riscv64.h
index e51ec6472c2..e5647b2c43f 100644
--- a/chromium/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/chromium/v8/src/execution/riscv64/simulator-riscv64.h
@@ -706,6 +706,7 @@ class Simulator : public SimulatorBase {
char* last_debugger_input_;
v8::internal::Isolate* isolate_;
+ v8::internal::Builtins builtins_;
// Stop is disabled if bit 31 is set.
static const uint32_t kStopDisabledBit = 1 << 31;
diff --git a/chromium/v8/src/execution/runtime-profiler.cc b/chromium/v8/src/execution/runtime-profiler.cc
index 013d7c0e9f7..05e828da50d 100644
--- a/chromium/v8/src/execution/runtime-profiler.cc
+++ b/chromium/v8/src/execution/runtime-profiler.cc
@@ -107,20 +107,8 @@ void TraceHeuristicOptimizationDisallowed(JSFunction function) {
}
}
-// TODO(jgruber): Remove this once we include this tracing with --trace-opt.
-void TraceNCIRecompile(JSFunction function, OptimizationReason reason) {
- if (FLAG_trace_turbo_nci) {
- StdoutStream os;
- os << "NCI tierup mark: " << Brief(function) << ", "
- << OptimizationReasonToString(reason) << std::endl;
- }
-}
-
void TraceRecompile(JSFunction function, OptimizationReason reason,
CodeKind code_kind, Isolate* isolate) {
- if (code_kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
- TraceNCIRecompile(function, reason);
- }
if (FLAG_trace_opt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[marking ");
@@ -187,8 +175,7 @@ void RuntimeProfiler::MaybeOptimizeFrame(JSFunction function,
if (function.shared().optimization_disabled()) return;
- // Note: We currently do not trigger OSR compilation from NCI or TP code.
- // TODO(jgruber,v8:8888): But we should.
+ // Note: We currently do not trigger OSR compilation from TP code.
if (frame->is_unoptimized()) {
if (FLAG_always_osr) {
AttemptOnStackReplacement(UnoptimizedFrame::cast(frame),
diff --git a/chromium/v8/src/execution/s390/simulator-s390.cc b/chromium/v8/src/execution/s390/simulator-s390.cc
index 435082a3b9d..93250b45f47 100644
--- a/chromium/v8/src/execution/s390/simulator-s390.cc
+++ b/chromium/v8/src/execution/s390/simulator-s390.cc
@@ -34,7 +34,7 @@ namespace internal {
// SScanF not being implemented in a platform independent way through
// ::v8::internal::OS in the same way as SNPrintF is that the
// Windows C Run-Time Library does not provide vsscanf.
-#define SScanF sscanf // NOLINT
+#define SScanF sscanf
const Simulator::fpr_t Simulator::fp_zero;
@@ -772,6 +772,8 @@ void Simulator::EvalTableInit() {
V(vsum, VSUM, 0xE764) /* type = VRR_C VECTOR SUM ACROSS WORD */ \
V(vsumg, VSUMG, 0xE765) /* type = VRR_C VECTOR SUM ACROSS DOUBLEWORD */ \
V(vpk, VPK, 0xE794) /* type = VRR_C VECTOR PACK */ \
+ V(vmrl, VMRL, 0xE760) /* type = VRR_C VECTOR MERGE LOW */ \
+ V(vmrh, VMRH, 0xE761) /* type = VRR_C VECTOR MERGE HIGH */ \
V(vpks, VPKS, 0xE797) /* type = VRR_B VECTOR PACK SATURATE */ \
V(vpkls, VPKLS, 0xE795) /* type = VRR_B VECTOR PACK LOGICAL SATURATE */ \
V(vupll, VUPLL, 0xE7D4) /* type = VRR_A VECTOR UNPACK LOGICAL LOW */ \
@@ -1558,7 +1560,7 @@ void Simulator::EvalTableInit() {
EvalTable[CZXT] = &Simulator::Evaluate_CZXT;
EvalTable[CDZT] = &Simulator::Evaluate_CDZT;
EvalTable[CXZT] = &Simulator::Evaluate_CXZT;
-} // NOLINT
+}
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
static base::OnceType once = V8_ONCE_INIT;
@@ -2000,8 +2002,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
// Place the return address on the stack, making the call GC safe.
- *reinterpret_cast<intptr_t*>(get_register(sp) +
- kStackFrameRASlot * kSystemPointerSize) =
+ *bit_cast<intptr_t*>(get_register(sp) +
+ kStackFrameRASlot * kSystemPointerSize) =
get_register(r14);
intptr_t external =
@@ -2141,7 +2143,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
if (!ABI_PASSES_HANDLES_IN_REGS) {
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ arg[0] = bit_cast<intptr_t>(arg[0]);
}
target(arg[0], arg[1]);
} else if (redirection->type() ==
@@ -2160,7 +2162,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
if (!ABI_PASSES_HANDLES_IN_REGS) {
- arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+ arg[0] = bit_cast<intptr_t>(arg[0]);
}
target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
} else {
@@ -2268,7 +2270,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// }
// #endif
}
- int64_t saved_lr = *reinterpret_cast<intptr_t*>(
+ int64_t saved_lr = *bit_cast<intptr_t*>(
get_register(sp) + kStackFrameRASlot * kSystemPointerSize);
#if (!V8_TARGET_ARCH_S390X && V8_HOST_ARCH_S390)
// On zLinux-31, the saved_lr might be tagged with a high bit of 1.
@@ -2485,7 +2487,7 @@ void Simulator::CallInternal(Address entry, int reg_arg_count) {
// Prepare to execute the code at entry
if (ABI_USES_FUNCTION_DESCRIPTORS) {
// entry is the function descriptor
- set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+ set_pc(*(bit_cast<intptr_t*>(entry)));
} else {
// entry is the instruction address
set_pc(static_cast<intptr_t>(entry));
@@ -2607,7 +2609,7 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
// Prepare to execute the code at entry
#if ABI_USES_FUNCTION_DESCRIPTORS
// entry is the function descriptor
- set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+ set_pc(*(bit_cast<intptr_t*>(entry)));
#else
// entry is the instruction address
set_pc(static_cast<intptr_t>(entry));
@@ -3134,12 +3136,12 @@ EVALUATE(VLREP) {
DCHECK_OPCODE(VLREP);
DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
-#define CASE(i, type) \
- case i: { \
- FOR_EACH_LANE(j, type) { \
- set_simd_register_by_lane<type>(r1, j, *reinterpret_cast<type*>(addr)); \
- } \
- break; \
+#define CASE(i, type) \
+ case i: { \
+ FOR_EACH_LANE(j, type) { \
+ set_simd_register_by_lane<type>(r1, j, *bit_cast<type*>(addr)); \
+ } \
+ break; \
}
switch (m3) {
CASE(0, uint8_t);
@@ -3273,8 +3275,10 @@ EVALUATE(VML) {
j = lane_size; \
} \
for (; j < kSimd128Size; i += 2, j += lane_size * 2, k++) { \
- input_type src0 = get_simd_register_by_lane<input_type>(r2, i); \
- input_type src1 = get_simd_register_by_lane<input_type>(r3, i); \
+ result_type src0 = static_cast<result_type>( \
+ get_simd_register_by_lane<input_type>(r2, i)); \
+ result_type src1 = static_cast<result_type>( \
+ get_simd_register_by_lane<input_type>(r3, i)); \
set_simd_register_by_lane<result_type>(r1, k, src0 * src1); \
}
#define VECTOR_MULTIPLY_EVEN_ODD(r1, r2, r3, is_odd, sign) \
@@ -3395,6 +3399,53 @@ EVALUATE(VSUMG) {
}
#undef CASE
+#define VECTOR_MERGE(type, is_low_side) \
+ constexpr size_t index_limit = (kSimd128Size / sizeof(type)) / 2; \
+ for (size_t i = 0, source_index = is_low_side ? i + index_limit : i; \
+ i < index_limit; i++, source_index++) { \
+ set_simd_register_by_lane<type>( \
+ r1, 2 * i, get_simd_register_by_lane<type>(r2, source_index)); \
+ set_simd_register_by_lane<type>( \
+ r1, (2 * i) + 1, get_simd_register_by_lane<type>(r3, source_index)); \
+ }
+#define CASE(i, type, is_low_side) \
+ case i: { \
+ VECTOR_MERGE(type, is_low_side) \
+ } break;
+EVALUATE(VMRL) {
+ DCHECK_OPCODE(VMRL);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ switch (m4) {
+ CASE(0, int8_t, true);
+ CASE(1, int16_t, true);
+ CASE(2, int32_t, true);
+ CASE(3, int64_t, true);
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+
+EVALUATE(VMRH) {
+ DCHECK_OPCODE(VMRH);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ switch (m4) {
+ CASE(0, int8_t, false);
+ CASE(1, int16_t, false);
+ CASE(2, int32_t, false);
+ CASE(3, int64_t, false);
+ default:
+ UNREACHABLE();
+ }
+ return length;
+}
+#undef CASE
+#undef VECTOR_MERGE
+
template <class S, class D>
void VectorPack(Simulator* sim, int dst, int src1, int src2, bool saturate,
const D& max = 0, const D& min = 0) {
@@ -3901,8 +3952,7 @@ EVALUATE(VBPERM) {
USE(m5);
USE(m6);
uint16_t result_bits = 0;
- unsigned __int128 src_bits =
- *(reinterpret_cast<__int128*>(get_simd_register(r2).int8));
+ unsigned __int128 src_bits = bit_cast<__int128>(get_simd_register(r2).int8);
for (int i = 0; i < kSimd128Size; i++) {
result_bits <<= 1;
uint8_t selected_bit_index = get_simd_register_by_lane<uint8_t>(r3, i);
@@ -5443,7 +5493,7 @@ EVALUATE(LD) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int64_t dbl_val = *reinterpret_cast<int64_t*>(addr);
+ int64_t dbl_val = *bit_cast<int64_t*>(addr);
set_fpr(r1, dbl_val);
return length;
}
@@ -5482,7 +5532,7 @@ EVALUATE(LE) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- float float_val = *reinterpret_cast<float*>(addr);
+ float float_val = *bit_cast<float*>(addr);
set_fpr(r1, float_val);
return length;
}
@@ -11157,7 +11207,7 @@ EVALUATE(LEY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- float float_val = *reinterpret_cast<float*>(addr);
+ float float_val = *bit_cast<float*>(addr);
set_fpr(r1, float_val);
return length;
}
@@ -11169,7 +11219,7 @@ EVALUATE(LDY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- uint64_t dbl_val = *reinterpret_cast<uint64_t*>(addr);
+ uint64_t dbl_val = *bit_cast<uint64_t*>(addr);
set_fpr(r1, dbl_val);
return length;
}
diff --git a/chromium/v8/src/execution/shared-mutex-guard-if-off-thread.h b/chromium/v8/src/execution/shared-mutex-guard-if-off-thread.h
index 972f6c0c89a..8dfcd09f396 100644
--- a/chromium/v8/src/execution/shared-mutex-guard-if-off-thread.h
+++ b/chromium/v8/src/execution/shared-mutex-guard-if-off-thread.h
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-template <typename LocalIsolate, base::MutexSharedType kIsShared>
+template <typename IsolateT, base::MutexSharedType kIsShared>
class SharedMutexGuardIfOffThread;
} // namespace internal
diff --git a/chromium/v8/src/execution/stack-guard.cc b/chromium/v8/src/execution/stack-guard.cc
index dd32f58b98c..0bbdf1444ac 100644
--- a/chromium/v8/src/execution/stack-guard.cc
+++ b/chromium/v8/src/execution/stack-guard.cc
@@ -12,6 +12,7 @@
#include "src/logging/counters.h"
#include "src/objects/backing-store.h"
#include "src/roots/roots-inl.h"
+#include "src/tracing/trace-event.h"
#include "src/utils/memcopy.h"
#if V8_ENABLE_WEBASSEMBLY
diff --git a/chromium/v8/src/extensions/cputracemark-extension.cc b/chromium/v8/src/extensions/cputracemark-extension.cc
index 5fde3608de8..029ad0f3cb7 100644
--- a/chromium/v8/src/extensions/cputracemark-extension.cc
+++ b/chromium/v8/src/extensions/cputracemark-extension.cc
@@ -16,9 +16,8 @@ CpuTraceMarkExtension::GetNativeFunctionTemplate(v8::Isolate* isolate,
void CpuTraceMarkExtension::Mark(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsUint32()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(),
- "First parameter to cputracemark() must be a unsigned int32."));
+ args.GetIsolate()->ThrowError(
+ "First parameter to cputracemark() must be a unsigned int32.");
return;
}
diff --git a/chromium/v8/src/extensions/externalize-string-extension.cc b/chromium/v8/src/extensions/externalize-string-extension.cc
index b153ebd075b..3e42b214de6 100644
--- a/chromium/v8/src/extensions/externalize-string-extension.cc
+++ b/chromium/v8/src/extensions/externalize-string-extension.cc
@@ -59,9 +59,8 @@ ExternalizeStringExtension::GetNativeFunctionTemplate(
void ExternalizeStringExtension::Externalize(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() < 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(),
- "First parameter to externalizeString() must be a string."));
+ args.GetIsolate()->ThrowError(
+ "First parameter to externalizeString() must be a string.");
return;
}
bool force_two_byte = false;
@@ -69,17 +68,15 @@ void ExternalizeStringExtension::Externalize(
if (args[1]->IsBoolean()) {
force_two_byte = args[1]->BooleanValue(args.GetIsolate());
} else {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(),
- "Second parameter to externalizeString() must be a boolean."));
+ args.GetIsolate()->ThrowError(
+ "Second parameter to externalizeString() must be a boolean.");
return;
}
}
bool result = false;
Handle<String> string = Utils::OpenHandle(*args[0].As<v8::String>());
if (!string->SupportsExternalization()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(), "string does not support externalization."));
+ args.GetIsolate()->ThrowError("string does not support externalization.");
return;
}
if (string->IsOneByteRepresentation() && !force_two_byte) {
@@ -98,8 +95,7 @@ void ExternalizeStringExtension::Externalize(
if (!result) delete resource;
}
if (!result) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(), "externalizeString() failed."));
+ args.GetIsolate()->ThrowError("externalizeString() failed.");
return;
}
}
@@ -108,9 +104,8 @@ void ExternalizeStringExtension::Externalize(
void ExternalizeStringExtension::IsOneByte(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(),
- "isOneByteString() requires a single string argument."));
+ args.GetIsolate()->ThrowError(
+ "isOneByteString() requires a single string argument.");
return;
}
bool is_one_byte =
diff --git a/chromium/v8/src/extensions/vtunedomain-support-extension.cc b/chromium/v8/src/extensions/vtunedomain-support-extension.cc
index 793ffec7167..9a7715bb237 100644
--- a/chromium/v8/src/extensions/vtunedomain-support-extension.cc
+++ b/chromium/v8/src/extensions/vtunedomain-support-extension.cc
@@ -109,10 +109,9 @@ void VTuneDomainSupportExtension::Mark(
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsString() || !args[1]->IsString() ||
!args[2]->IsString()) {
- args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal(
- args.GetIsolate(),
+ args.GetIsolate()->ThrowError(
"Parameter number should be exactly three, first domain name"
- "second task name, third start/end"));
+ "second task name, third start/end");
return;
}
@@ -130,7 +129,7 @@ void VTuneDomainSupportExtension::Mark(
int r = 0;
if ((r = libvtune::invoke(params.str().c_str())) != 0) {
- args.GetIsolate()->ThrowException(
+ args.GetIsolate()->ThrowError(
v8::String::NewFromUtf8(args.GetIsolate(), std::to_string(r).c_str())
.ToLocalChecked());
}
diff --git a/chromium/v8/src/flags/flag-definitions.h b/chromium/v8/src/flags/flag-definitions.h
index fa59c204613..e1f493ae7b1 100644
--- a/chromium/v8/src/flags/flag-definitions.h
+++ b/chromium/v8/src/flags/flag-definitions.h
@@ -151,6 +151,12 @@ struct MaybeBoolFlag {
#define COMPRESS_POINTERS_BOOL false
#endif
+#ifdef V8_MAP_PACKING
+#define V8_MAP_PACKING_BOOL true
+#else
+#define V8_MAP_PACKING_BOOL false
+#endif
+
#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
#define COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL true
#else
@@ -176,7 +182,7 @@ struct MaybeBoolFlag {
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM
+ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
@@ -259,8 +265,6 @@ DEFINE_BOOL(use_strict, false, "enforce strict mode")
DEFINE_BOOL(harmony, false, "enable all completed harmony features")
DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
-// Enabling FinalizationRegistry#cleanupSome also enables weak refs
-DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
// Update bootstrapper.cc whenever adding a new feature flag.
@@ -269,12 +273,13 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
V(harmony_regexp_sequence, "RegExp Unicode sequence properties") \
V(harmony_weak_refs_with_cleanup_some, \
"harmony weak references with FinalizationRegistry.prototype.cleanupSome") \
- V(harmony_import_assertions, "harmony import assertions")
+ V(harmony_import_assertions, "harmony import assertions") \
+ V(harmony_rab_gsab, \
+ "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_INPROGRESS(V) \
- HARMONY_INPROGRESS_BASE(V) \
- V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher") \
+#define HARMONY_INPROGRESS(V) \
+ HARMONY_INPROGRESS_BASE(V) \
V(harmony_intl_displaynames_date_types, "Intl.DisplayNames date types")
#else
#define HARMONY_INPROGRESS(V) HARMONY_INPROGRESS_BASE(V)
@@ -282,14 +287,14 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED_BASE(V) \
- V(harmony_relative_indexing_methods, "harmony relative indexing methods") \
- V(harmony_class_static_blocks, "harmony static initializer blocks")
+ V(harmony_class_static_blocks, "harmony static initializer blocks") \
+ V(harmony_error_cause, "harmony error cause property")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_STAGED(V) \
- HARMONY_STAGED_BASE(V) \
- V(harmony_intl_dateformat_day_period, \
- "Add dayPeriod option to DateTimeFormat")
+#define HARMONY_STAGED(V) \
+ HARMONY_STAGED_BASE(V) \
+ V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher") \
+ V(harmony_intl_locale_info, "Intl locale info")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
@@ -298,13 +303,16 @@ DEFINE_IMPLICATION(harmony_weak_refs_with_cleanup_some, harmony_weak_refs)
#define HARMONY_SHIPPING_BASE(V) \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
V(harmony_atomics, "harmony atomics") \
- V(harmony_weak_refs, "harmony weak references") \
V(harmony_regexp_match_indices, "harmony regexp match indices") \
V(harmony_private_brand_checks, "harmony private brand checks") \
- V(harmony_top_level_await, "harmony top level await")
+ V(harmony_top_level_await, "harmony top level await") \
+ V(harmony_relative_indexing_methods, "harmony relative indexing methods")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_dateformat_day_period, \
+ "Add dayPeriod option to DateTimeFormat")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -334,6 +342,13 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
DEFINE_BOOL(builtin_subclassing, true,
"subclassing support in built-in methods")
+// If the following flag is set to `true`, the SharedArrayBuffer constructor is
+// enabled per context depending on the callback set via
+// `SetSharedArrayBufferConstructorEnabledCallback`. If no callback is set, the
+// SharedArrayBuffer constructor is disabled.
+DEFINE_BOOL(enable_sharedarraybuffer_per_context, false,
+ "enable the SharedArrayBuffer constructor per context")
+
#ifdef V8_INTL_SUPPORT
DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#endif
@@ -362,6 +377,17 @@ DEFINE_BOOL(icu_timezone_data, true, "get information about timezones from ICU")
#define V8_SHARED_RO_HEAP_BOOL false
#endif
+DEFINE_BOOL(stress_snapshot, false,
+ "disables sharing of the read-only heap for testing")
+// Incremental marking is incompatible with the stress_snapshot mode;
+// specifically, serialization may clear bytecode arrays from shared function
+// infos which the MarkCompactCollector (running concurrently) may still need.
+// See also https://crbug.com/v8/10882.
+//
+// Note: This is not an issue in production because we don't clear SFI's
+// there (that only happens in mksnapshot and in --stress-snapshot mode).
+DEFINE_NEG_IMPLICATION(stress_snapshot, incremental_marking)
+
DEFINE_BOOL(lite_mode, V8_LITE_BOOL,
"enables trade-off of performance for memory savings")
@@ -379,6 +405,13 @@ DEFINE_IMPLICATION(lite_mode, optimize_for_size)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, inline_new)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, allocation_site_pretenuring)
DEFINE_NEG_IMPLICATION(enable_third_party_heap, turbo_allocation_folding)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_recompilation)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_inlining)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap,
+ finalize_streaming_on_background)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, use_marking_progress_bar)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, move_object_start)
+DEFINE_NEG_IMPLICATION(enable_third_party_heap, concurrent_marking)
DEFINE_BOOL_READONLY(enable_third_party_heap, V8_ENABLE_THIRD_PARTY_HEAP_BOOL,
"Use third-party heap")
@@ -407,19 +440,15 @@ DEFINE_BOOL_READONLY(enable_unconditional_write_barriers,
"always use full write barriers")
#ifdef V8_ENABLE_SINGLE_GENERATION
-#define V8_GENERATION_BOOL true
+#define V8_SINGLE_GENERATION_BOOL true
#else
-#define V8_GENERATION_BOOL false
+#define V8_SINGLE_GENERATION_BOOL false
#endif
DEFINE_BOOL_READONLY(
- single_generation, V8_GENERATION_BOOL,
+ single_generation, V8_SINGLE_GENERATION_BOOL,
"allocate all objects from young generation to old generation")
-// Prevent inline allocation into new space
-DEFINE_NEG_IMPLICATION(single_generation, inline_new)
-DEFINE_NEG_IMPLICATION(single_generation, turbo_allocation_folding)
-
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
#define V8_ENABLE_CONSERVATIVE_STACK_SCANNING_BOOL true
#else
@@ -589,7 +618,6 @@ DEFINE_BOOL(trace_generalization, false, "trace map generalization")
// Flags for TurboProp.
DEFINE_BOOL(turboprop, false, "enable experimental turboprop mid-tier compiler")
-DEFINE_IMPLICATION(turboprop, turbo_direct_heap_access)
DEFINE_BOOL(turboprop_mid_tier_reg_alloc, true,
"enable mid-tier register allocator for turboprop")
DEFINE_BOOL(
@@ -619,11 +647,10 @@ DEFINE_BOOL(always_sparkplug, false, "directly tier up to Sparkplug code")
DEFINE_IMPLICATION(always_sparkplug, sparkplug)
#endif
DEFINE_STRING(sparkplug_filter, "*", "filter for Sparkplug baseline compiler")
+DEFINE_BOOL(sparkplug_needs_short_builtins, false,
+ "only enable Sparkplug baseline compiler when "
+ "--short-builtin-calls are also enabled")
DEFINE_BOOL(trace_baseline, false, "trace baseline compilation")
-#if !defined(V8_OS_MACOSX) || !defined(V8_HOST_ARCH_ARM64)
-// Don't disable --write-protect-code-memory on Apple Silicon.
-DEFINE_WEAK_VALUE_IMPLICATION(sparkplug, write_protect_code_memory, false)
-#endif
#undef FLAG
#define FLAG FLAG_FULL
@@ -652,13 +679,16 @@ DEFINE_BOOL(concurrent_inlining, false,
"run optimizing compiler's inlining phase on a separate thread")
DEFINE_BOOL(stress_concurrent_inlining, false,
"makes concurrent inlining more likely to trigger in tests")
-DEFINE_BOOL(turbo_direct_heap_access, false,
- "access kNeverSerialized objects directly from the heap")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
-DEFINE_IMPLICATION(concurrent_inlining, turbo_direct_heap_access)
+DEFINE_BOOL(
+ turbo_concurrent_get_property_access_info, false,
+ "concurrently call GetPropertyAccessInfo (only with --concurrent-inlining)")
+DEFINE_BOOL(turbo_concurrent_inlining_check_ispendingallocation, false,
+ "when --concurrent-inlining is enabled, check IsPendingAllocation "
+ "in Ref construction")
DEFINE_INT(max_serializer_nesting, 25,
"maximum levels for nesting child serializers")
DEFINE_WEAK_IMPLICATION(future, concurrent_inlining)
@@ -811,28 +841,20 @@ DEFINE_INT(reuse_opt_code_count, 0,
"don't discard optimized code for the specified number of deopts.")
DEFINE_BOOL(turbo_dynamic_map_checks, true,
"use dynamic map checks when generating code for property accesses "
- "if all handlers in an IC are the same for turboprop and NCI")
+ "if all handlers in an IC are the same for turboprop")
DEFINE_BOOL(turbo_compress_translation_arrays, false,
"compress translation arrays (experimental)")
DEFINE_BOOL(turbo_inline_js_wasm_calls, false, "inline JS->Wasm calls")
-
-// Native context independent (NCI) code.
-DEFINE_BOOL(turbo_nci, false,
- "enable experimental native context independent code.")
-// TODO(v8:8888): Temporary until NCI caching is implemented or
-// feedback collection is made unconditional.
-DEFINE_IMPLICATION(turbo_nci, turbo_collect_feedback_in_generic_lowering)
-DEFINE_BOOL(print_nci_code, false, "print native context independent code.")
-DEFINE_BOOL(trace_turbo_nci, false, "trace native context independent code.")
DEFINE_BOOL(turbo_collect_feedback_in_generic_lowering, true,
"enable experimental feedback collection in generic lowering.")
-// TODO(jgruber,v8:8888): Remove this flag once we've settled on an ageing
-// strategy.
DEFINE_BOOL(isolate_script_cache_ageing, true,
"enable ageing of the isolate script cache.")
-DEFINE_INT(script_run_delay, 0, "sleep [ms] on every Script::Run")
-DEFINE_INT(script_run_delay_once, 0, "sleep [ms] on the first Script::Run")
+DEFINE_FLOAT(script_delay, 0, "busy wait [ms] on every Script::Run")
+DEFINE_FLOAT(script_delay_once, 0, "busy wait [ms] on the first Script::Run")
+DEFINE_FLOAT(script_delay_fraction, 0.0,
+ "busy wait after each Script::Run by the given fraction of the "
+ "run's duration")
// Favor memory over execution speed.
DEFINE_BOOL(optimize_for_size, false,
@@ -862,7 +884,11 @@ DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false,
"trace wasm native heap events")
DEFINE_BOOL(wasm_write_protect_code_memory, false,
- "write protect code memory on the wasm native heap")
+ "write protect code memory on the wasm native heap with mprotect")
+DEFINE_BOOL(wasm_memory_protection_keys, false,
+ "protect wasm code memory with PKU if available, no protection "
+ "without support; fallback to mprotect by adding "
+ "--wasm-write-protect-code-memory")
DEFINE_DEBUG_BOOL(trace_wasm_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
@@ -1172,7 +1198,6 @@ DEFINE_GENERIC_IMPLICATION(
TracingFlags::gc_stats.store(
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
DEFINE_NEG_IMPLICATION(trace_gc_object_stats, incremental_marking)
-DEFINE_NEG_IMPLICATION(track_retaining_path, incremental_marking)
DEFINE_NEG_IMPLICATION(track_retaining_path, parallel_marking)
DEFINE_NEG_IMPLICATION(track_retaining_path, concurrent_marking)
DEFINE_BOOL(track_detached_contexts, true,
@@ -1261,11 +1286,19 @@ DEFINE_BOOL_READONLY(
"object space")
// assembler-ia32.cc / assembler-arm.cc / assembler-arm64.cc / assembler-x64.cc
+#ifdef V8_ENABLE_DEBUG_CODE
DEFINE_BOOL(debug_code, DEBUG_BOOL,
"generate extra code (assertions) for debugging")
+#else
+DEFINE_BOOL_READONLY(debug_code, false, "")
+#endif
+#ifdef V8_CODE_COMMENTS
DEFINE_BOOL(code_comments, false,
"emit comments in code disassembly; for more readable source "
"positions you should add --no-concurrent_recompilation")
+#else
+DEFINE_BOOL_READONLY(code_comments, false, "")
+#endif
DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
DEFINE_BOOL(enable_sse4_1, true,
@@ -1293,10 +1326,18 @@ DEFINE_BOOL(partial_constant_pool, true,
DEFINE_STRING(sim_arm64_optional_features, "none",
"enable optional features on the simulator for testing: none or "
"all")
-DEFINE_BOOL(debug_riscv, false, "enable debug prints")
-// TODO(RISCV): https://github.com/v8-riscv/v8/issues/330
-DEFINE_BOOL(disable_riscv_constant_pool, true,
- "disable constant pool (RISCV only)")
+
+#if defined(V8_TARGET_ARCH_RISCV64)
+DEFINE_BOOL(riscv_trap_to_simulator_debugger, false,
+ "enable simulator trap to debugger")
+DEFINE_BOOL(riscv_debug, false, "enable debug prints")
+
+DEFINE_BOOL(riscv_constant_pool, true,
+ "enable constant pool (RISCV only)")
+
+DEFINE_BOOL(riscv_c_extension, false,
+ "enable compressed extension isa variant (RISCV only)")
+#endif
// Controlling source positions for Torque/CSA code.
DEFINE_BOOL(enable_source_at_csa_bind, false,
@@ -1472,6 +1513,8 @@ DEFINE_BOOL(native_code_counters, DEBUG_BOOL,
DEFINE_BOOL(super_ic, true, "use an IC for super property loads")
+DEFINE_BOOL(enable_mega_dom_ic, false, "use MegaDOM IC state for API objects")
+
// objects.cc
DEFINE_BOOL(thin_strings, true, "Enable ThinString support")
DEFINE_BOOL(trace_prototype_users, false,
@@ -1554,7 +1597,7 @@ DEFINE_BOOL(
"includes arguments for each function call in the error stack frames array")
DEFINE_BOOL(adjust_os_scheduling_parameters, true,
"adjust OS specific scheduling params for the isolate")
-DEFINE_BOOL(experimental_flush_embedded_blob_icache, false,
+DEFINE_BOOL(experimental_flush_embedded_blob_icache, true,
"Used in an experiment to evaluate icache flushing on certain CPUs")
// Flags for short builtin calls feature
diff --git a/chromium/v8/src/flags/flags.cc b/chromium/v8/src/flags/flags.cc
index 9825b5b1eaf..2cd9174148a 100644
--- a/chromium/v8/src/flags/flags.cc
+++ b/chromium/v8/src/flags/flags.cc
@@ -16,6 +16,8 @@
#include "src/base/platform/platform.h"
#include "src/codegen/cpu-features.h"
#include "src/logging/counters.h"
+#include "src/logging/tracing-flags.h"
+#include "src/tracing/tracing-category-observer.h"
#include "src/utils/allocation.h"
#include "src/utils/memcopy.h"
#include "src/utils/ostreams.h"
diff --git a/chromium/v8/src/handles/global-handles.cc b/chromium/v8/src/handles/global-handles.cc
index 6bc290eac71..9e4aad27fb3 100644
--- a/chromium/v8/src/handles/global-handles.cc
+++ b/chromium/v8/src/handles/global-handles.cc
@@ -8,8 +8,10 @@
#include <cstdint>
#include <map>
+#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/base/compiler-specific.h"
+#include "src/base/sanitizer/asan.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -18,7 +20,6 @@
#include "src/objects/objects-inl.h"
#include "src/objects/slots.h"
#include "src/objects/visitors.h"
-#include "src/sanitizer/asan.h"
#include "src/tasks/cancelable-task.h"
#include "src/tasks/task-utils.h"
#include "src/utils/utils.h"
@@ -382,11 +383,11 @@ namespace {
void ExtractInternalFields(JSObject jsobject, void** embedder_fields, int len) {
int field_count = jsobject.GetEmbedderFieldCount();
- PtrComprCageBase cage_base = GetPtrComprCageBase(jsobject);
+ Isolate* isolate = GetIsolateForHeapSandbox(jsobject);
for (int i = 0; i < len; ++i) {
if (field_count == i) break;
void* pointer;
- if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(cage_base, &pointer)) {
+ if (EmbedderDataSlot(jsobject, i).ToAlignedPointer(isolate, &pointer)) {
embedder_fields[i] = pointer;
}
}
@@ -1252,18 +1253,21 @@ void GlobalHandles::IdentifyWeakUnmodifiedObjects(
WeakSlotCallback is_unmodified) {
if (!FLAG_reclaim_unmodified_wrappers) return;
- LocalEmbedderHeapTracer* const tracer =
- isolate()->heap()->local_embedder_heap_tracer();
+ // Treat all objects as roots during incremental marking to avoid corrupting
+ // marking worklists.
+ if (isolate()->heap()->incremental_marking()->IsMarking()) return;
+
+ auto* const handler = isolate()->heap()->GetEmbedderRootsHandler();
for (TracedNode* node : traced_young_nodes_) {
if (node->IsInUse()) {
DCHECK(node->is_root());
if (is_unmodified(node->location())) {
v8::Value* value = ToApi<v8::Value>(node->handle());
if (node->has_destructor()) {
- node->set_root(tracer->IsRootForNonTracingGC(
+ node->set_root(handler->IsRoot(
*reinterpret_cast<v8::TracedGlobal<v8::Value>*>(&value)));
} else {
- node->set_root(tracer->IsRootForNonTracingGC(
+ node->set_root(handler->IsRoot(
*reinterpret_cast<v8::TracedReference<v8::Value>*>(&value)));
}
}
@@ -1337,8 +1341,7 @@ void GlobalHandles::IterateYoungWeakObjectsForPhantomHandles(
if (!FLAG_reclaim_unmodified_wrappers) return;
- LocalEmbedderHeapTracer* const tracer =
- isolate()->heap()->local_embedder_heap_tracer();
+ auto* const handler = isolate()->heap()->GetEmbedderRootsHandler();
for (TracedNode* node : traced_young_nodes_) {
if (!node->IsInUse()) continue;
@@ -1353,7 +1356,7 @@ void GlobalHandles::IterateYoungWeakObjectsForPhantomHandles(
node->ResetPhantomHandle(HandleHolder::kLive);
} else {
v8::Value* value = ToApi<v8::Value>(node->handle());
- tracer->ResetHandleInNonTracingGC(
+ handler->ResetRoot(
*reinterpret_cast<v8::TracedReference<v8::Value>*>(&value));
DCHECK(!node->IsInUse());
}
diff --git a/chromium/v8/src/handles/handles-inl.h b/chromium/v8/src/handles/handles-inl.h
index 360da25cf63..4c1817e80d2 100644
--- a/chromium/v8/src/handles/handles-inl.h
+++ b/chromium/v8/src/handles/handles-inl.h
@@ -5,12 +5,12 @@
#ifndef V8_HANDLES_HANDLES_INL_H_
#define V8_HANDLES_HANDLES_INL_H_
+#include "src/base/sanitizer/msan.h"
#include "src/execution/isolate.h"
#include "src/execution/local-isolate.h"
#include "src/handles/handles.h"
#include "src/handles/local-handles-inl.h"
#include "src/objects/objects.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/handles/local-handles-inl.h b/chromium/v8/src/handles/local-handles-inl.h
index c9f1588410b..b8621559492 100644
--- a/chromium/v8/src/handles/local-handles-inl.h
+++ b/chromium/v8/src/handles/local-handles-inl.h
@@ -5,10 +5,10 @@
#ifndef V8_HANDLES_LOCAL_HANDLES_INL_H_
#define V8_HANDLES_LOCAL_HANDLES_INL_H_
+#include "src/base/sanitizer/msan.h"
#include "src/execution/isolate.h"
#include "src/execution/local-isolate.h"
#include "src/handles/local-handles.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/heap/array-buffer-sweeper.cc b/chromium/v8/src/heap/array-buffer-sweeper.cc
index 8af2a60e9c1..108a3497776 100644
--- a/chromium/v8/src/heap/array-buffer-sweeper.cc
+++ b/chromium/v8/src/heap/array-buffer-sweeper.cc
@@ -101,30 +101,25 @@ void ArrayBufferSweeper::EnsureFinished() {
UNREACHABLE();
}
- DecrementExternalMemoryCounters();
+ UpdateCountersForConcurrentlySweptExtensions();
sweeping_in_progress_ = false;
}
-void ArrayBufferSweeper::AdjustCountersAndMergeIfPossible() {
+void ArrayBufferSweeper::MergeBackExtensionsWhenSwept() {
if (sweeping_in_progress_) {
DCHECK(job_.has_value());
if (job_->state_ == SweepingState::kDone) {
Merge();
sweeping_in_progress_ = false;
} else {
- DecrementExternalMemoryCounters();
+ UpdateCountersForConcurrentlySweptExtensions();
}
}
}
-void ArrayBufferSweeper::DecrementExternalMemoryCounters() {
+void ArrayBufferSweeper::UpdateCountersForConcurrentlySweptExtensions() {
size_t freed_bytes = freed_bytes_.exchange(0, std::memory_order_relaxed);
-
- if (freed_bytes > 0) {
- heap_->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kArrayBuffer, freed_bytes);
- heap_->update_external_memory(-static_cast<int64_t>(freed_bytes));
- }
+ DecrementExternalMemoryCounters(freed_bytes);
}
void ArrayBufferSweeper::RequestSweepYoung() {
@@ -166,7 +161,7 @@ void ArrayBufferSweeper::RequestSweep(SweepingScope scope) {
Prepare(scope);
job_->Sweep();
Merge();
- DecrementExternalMemoryCounters();
+ UpdateCountersForConcurrentlySweptExtensions();
}
}
@@ -228,18 +223,52 @@ void ArrayBufferSweeper::Append(JSArrayBuffer object,
old_bytes_ += bytes;
}
- AdjustCountersAndMergeIfPossible();
- DecrementExternalMemoryCounters();
+ MergeBackExtensionsWhenSwept();
IncrementExternalMemoryCounters(bytes);
}
+void ArrayBufferSweeper::Detach(JSArrayBuffer object,
+ ArrayBufferExtension* extension) {
+ size_t bytes = extension->ClearAccountingLength();
+
+ // We cannot free the extension eagerly here, since extensions are tracked in
+ // a singly linked list. The next GC will remove it automatically.
+
+ if (!sweeping_in_progress_) {
+ // If concurrent sweeping isn't running at the moment, we can also adjust
+ // young_bytes_ or old_bytes_ right away.
+ if (Heap::InYoungGeneration(object)) {
+ DCHECK_GE(young_bytes_, bytes);
+ young_bytes_ -= bytes;
+ young_.bytes_ -= bytes;
+ } else {
+ DCHECK_GE(old_bytes_, bytes);
+ old_bytes_ -= bytes;
+ old_.bytes_ -= bytes;
+ }
+ }
+
+ MergeBackExtensionsWhenSwept();
+ DecrementExternalMemoryCounters(bytes);
+}
+
void ArrayBufferSweeper::IncrementExternalMemoryCounters(size_t bytes) {
+ if (bytes == 0) return;
heap_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, bytes);
reinterpret_cast<v8::Isolate*>(heap_->isolate())
->AdjustAmountOfExternalAllocatedMemory(static_cast<int64_t>(bytes));
}
+void ArrayBufferSweeper::DecrementExternalMemoryCounters(size_t bytes) {
+ if (bytes == 0) return;
+ heap_->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kArrayBuffer, bytes);
+ // Unlike IncrementExternalMemoryCounters we don't use
+ // AdjustAmountOfExternalAllocatedMemory such that we never start a GC here.
+ heap_->update_external_memory(-static_cast<int64_t>(bytes));
+}
+
void ArrayBufferSweeper::IncrementFreedBytes(size_t bytes) {
if (bytes == 0) return;
freed_bytes_.fetch_add(bytes, std::memory_order_relaxed);
diff --git a/chromium/v8/src/heap/array-buffer-sweeper.h b/chromium/v8/src/heap/array-buffer-sweeper.h
index 0c15428b754..6dd7ed97f6c 100644
--- a/chromium/v8/src/heap/array-buffer-sweeper.h
+++ b/chromium/v8/src/heap/array-buffer-sweeper.h
@@ -59,8 +59,12 @@ class ArrayBufferSweeper {
void RequestSweepYoung();
void RequestSweepFull();
+ // Track the given ArrayBufferExtension for the given JSArrayBuffer.
void Append(JSArrayBuffer object, ArrayBufferExtension* extension);
+ // Detaches an ArrayBufferExtension from a JSArrayBuffer.
+ void Detach(JSArrayBuffer object, ArrayBufferExtension* extension);
+
ArrayBufferList young() { return young_; }
ArrayBufferList old() { return old_; }
@@ -98,10 +102,11 @@ class ArrayBufferSweeper {
base::Optional<SweepingJob> job_;
void Merge();
- void AdjustCountersAndMergeIfPossible();
+ void MergeBackExtensionsWhenSwept();
- void DecrementExternalMemoryCounters();
+ void UpdateCountersForConcurrentlySweptExtensions();
void IncrementExternalMemoryCounters(size_t bytes);
+ void DecrementExternalMemoryCounters(size_t bytes);
void IncrementFreedBytes(size_t bytes);
void RequestSweep(SweepingScope sweeping_task);
diff --git a/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
index 1de4055a283..2d90aab1829 100644
--- a/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/riscv64/push_registers_asm.cc
@@ -5,21 +5,26 @@
// Push all callee-saved registers to get them on the stack for conservative
// stack scanning.
//
-// See asm/x64/push_registers_clang.cc for why the function is not generated
+// See asm/x64/push_registers_asm.cc for why the function is not generated
// using clang.
//
-// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
-// GN toolchain (e.g. ChromeOS) and not provide them.
+// Calling convention source:
+// https://riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf Table 18.2
asm(".global PushAllRegistersAndIterateStack \n"
".type PushAllRegistersAndIterateStack, %function \n"
".hidden PushAllRegistersAndIterateStack \n"
"PushAllRegistersAndIterateStack: \n"
// Push all callee-saved registers and save return address.
- " addi sp, sp, -96 \n"
- " sd ra, 88(sp) \n"
- " sd s8, 80(sp) \n"
- " sd sp, 72(sp) \n"
- " sd gp, 64(sp) \n"
+ " addi sp, sp, -112 \n"
+ // Save return address.
+ " sd ra, 104(sp) \n"
+ // sp is callee-saved.
+ " sd sp, 96(sp) \n"
+ // s0-s11 are callee-saved.
+ " sd s11, 88(sp) \n"
+ " sd s10, 80(sp) \n"
+ " sd s9, 72(sp) \n"
+ " sd s8, 64(sp) \n"
" sd s7, 56(sp) \n"
" sd s6, 48(sp) \n"
" sd s5, 40(sp) \n"
@@ -28,18 +33,19 @@ asm(".global PushAllRegistersAndIterateStack \n"
" sd s2, 16(sp) \n"
" sd s1, 8(sp) \n"
" sd s0, 0(sp) \n"
- // Maintain frame pointer.
- " mv s8, sp \n"
+ // Maintain frame pointer(fp is s0).
+ " mv s0, sp \n"
// Pass 1st parameter (a0) unchanged (Stack*).
// Pass 2nd parameter (a1) unchanged (StackVisitor*).
- // Save 3rd parameter (a2; IterateStackCallback).
+ // Save 3rd parameter (a2; IterateStackCallback) to a3.
" mv a3, a2 \n"
+ // Pass 3rd parameter as sp (stack pointer).
" mv a2, sp \n"
// Call the callback.
" jalr a3 \n"
// Load return address.
- " ld ra, 88(sp) \n"
+ " ld ra, 104(sp) \n"
// Restore frame pointer.
- " ld s8, 80(sp) \n"
- " addi sp, sp, 96 \n"
+ " ld s0, 0(sp) \n"
+ " addi sp, sp, 112 \n"
" jr ra \n");
diff --git a/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
index 68f7918c93c..9780b877b8c 100644
--- a/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_asm.cc
@@ -19,7 +19,7 @@
#ifdef _WIN64
// We maintain 16-byte alignment at calls. There is an 8-byte return address
-// on the stack and we push 72 bytes which maintains 16-byte stack alignment
+// on the stack and we push 232 bytes which maintains 16-byte stack alignment
// at the call.
// Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
asm(".globl PushAllRegistersAndIterateStack \n"
@@ -36,6 +36,18 @@ asm(".globl PushAllRegistersAndIterateStack \n"
" push %r13 \n"
" push %r14 \n"
" push %r15 \n"
+ " sub $160, %rsp \n"
+ // Use aligned instrs as we are certain that the stack is properly aligned.
+ " movdqa %xmm6, 144(%rsp) \n"
+ " movdqa %xmm7, 128(%rsp) \n"
+ " movdqa %xmm8, 112(%rsp) \n"
+ " movdqa %xmm9, 96(%rsp) \n"
+ " movdqa %xmm10, 80(%rsp) \n"
+ " movdqa %xmm11, 64(%rsp) \n"
+ " movdqa %xmm12, 48(%rsp) \n"
+ " movdqa %xmm13, 32(%rsp) \n"
+ " movdqa %xmm14, 16(%rsp) \n"
+ " movdqa %xmm15, (%rsp) \n"
// Pass 1st parameter (rcx) unchanged (Stack*).
// Pass 2nd parameter (rdx) unchanged (StackVisitor*).
// Save 3rd parameter (r8; IterateStackCallback)
@@ -45,7 +57,7 @@ asm(".globl PushAllRegistersAndIterateStack \n"
// Call the callback.
" call *%r9 \n"
// Pop the callee-saved registers.
- " add $64, %rsp \n"
+ " add $224, %rsp \n"
// Restore rbp as it was used as frame pointer.
" pop %rbp \n"
" ret \n");
diff --git a/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
index 627843830fa..a32e193c2f1 100644
--- a/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
+++ b/chromium/v8/src/heap/base/asm/x64/push_registers_masm.S
@@ -13,8 +13,8 @@ PushAllRegistersAndIterateStack:
;; stack scanning.
;;
;; We maintain 16-byte alignment at calls. There is an 8-byte return address
- ;; on the stack and we push 72 bytes which maintains 16-byte stack alignment
- ;; at the call.
+ ;; on the stack and we push 232 bytes which maintains 16-byte stack
+ ;; alignment at the call.
;; Source: https://docs.microsoft.com/en-us/cpp/build/x64-calling-convention
;;
;; rbp is callee-saved. Maintain proper frame pointer for debugging.
@@ -28,6 +28,18 @@ PushAllRegistersAndIterateStack:
push r13
push r14
push r15
+ sub rsp, 160
+ ;; Use aligned instrs as we are certain that the stack is properly aligned.
+ movdqa xmmword ptr [rsp + 144], xmm6
+ movdqa xmmword ptr [rsp + 128], xmm7
+ movdqa xmmword ptr [rsp + 112], xmm8
+ movdqa xmmword ptr [rsp + 96], xmm9
+ movdqa xmmword ptr [rsp + 80], xmm10
+ movdqa xmmword ptr [rsp + 64], xmm11
+ movdqa xmmword ptr [rsp + 48], xmm12
+ movdqa xmmword ptr [rsp + 32], xmm13
+ movdqa xmmword ptr [rsp + 16], xmm14
+ movdqa xmmword ptr [rsp], xmm15
;; Pass 1st parameter (rcx) unchanged (Stack*).
;; Pass 2nd parameter (rdx) unchanged (StackVisitor*).
;; Save 3rd parameter (r8; IterateStackCallback)
@@ -37,7 +49,7 @@ PushAllRegistersAndIterateStack:
;; Call the callback.
call r9
;; Pop the callee-saved registers.
- add rsp, 64
+ add rsp, 224
;; Restore rbp as it was used as frame pointer.
pop rbp
ret
diff --git a/chromium/v8/src/heap/base/stack.cc b/chromium/v8/src/heap/base/stack.cc
index 939487ca77d..f6d522f1931 100644
--- a/chromium/v8/src/heap/base/stack.cc
+++ b/chromium/v8/src/heap/base/stack.cc
@@ -6,9 +6,11 @@
#include <limits>
+#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/asan.h"
+#include "src/base/sanitizer/msan.h"
#include "src/heap/cppgc/globals.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace heap {
namespace base {
@@ -41,7 +43,7 @@ namespace {
// No ASAN support as accessing fake frames otherwise results in
// "stack-use-after-scope" warnings.
-NO_SANITIZE_ADDRESS
+DISABLE_ASAN
void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
void* asan_fake_stack,
const void* stack_start,
@@ -77,7 +79,7 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
#if defined(__has_feature)
#if __has_feature(safe_stack)
// Source:
- // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/safestack/safestack.cpp
+ // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/safestack/safestack.cpp
constexpr size_t kSafeStackAlignmentBytes = 16;
void* stack_end = __builtin___get_unsafe_stack_ptr();
void* stack_start = __builtin___get_unsafe_stack_top();
@@ -101,7 +103,7 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
// any data that needs to be scanned.
V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
-NO_SANITIZE_ADDRESS
+DISABLE_ASAN
void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
intptr_t* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -116,7 +118,7 @@ void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
// MSAN: Instead of unpoisoning the whole stack, the slot's value is copied
// into a local which is unpoisoned.
void* address = *current;
- MSAN_UNPOISON(&address, sizeof(address));
+ MSAN_MEMORY_IS_INITIALIZED(&address, sizeof(address));
if (address == nullptr) continue;
visitor->VisitPointer(address);
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -135,5 +137,10 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
IterateSafeStackIfNecessary(visitor);
}
+void Stack::IteratePointersUnsafe(StackVisitor* visitor,
+ uintptr_t stack_end) const {
+ IteratePointersImpl(this, visitor, reinterpret_cast<intptr_t*>(stack_end));
+}
+
} // namespace base
} // namespace heap
diff --git a/chromium/v8/src/heap/base/stack.h b/chromium/v8/src/heap/base/stack.h
index a46e6e660ed..d7267deee7e 100644
--- a/chromium/v8/src/heap/base/stack.h
+++ b/chromium/v8/src/heap/base/stack.h
@@ -26,10 +26,20 @@ class V8_EXPORT_PRIVATE Stack final {
// Returns true if |slot| is part of the stack and false otherwise.
bool IsOnStack(void* slot) const;
- // Word-aligned iteration of the stack. Slot values are passed on to
- // |visitor|.
+ // Word-aligned iteration of the stack. Callee-saved registers are pushed to
+ // the stack before iterating pointers. Slot values are passed on to
+ // `visitor`.
void IteratePointers(StackVisitor* visitor) const;
+ // Word-aligned iteration of the stack, starting at `stack_end`. Slot values
+ // are passed on to `visitor`. This is intended to be used with verifiers that
+ // only visit a subset of the stack of IteratePointers().
+ //
+ // **Ignores:**
+ // - Callee-saved registers.
+ // - SafeStack.
+ void IteratePointersUnsafe(StackVisitor* visitor, uintptr_t stack_end) const;
+
// Returns the start of the stack.
const void* stack_start() const { return stack_start_; }
diff --git a/chromium/v8/src/heap/basic-memory-chunk.h b/chromium/v8/src/heap/basic-memory-chunk.h
index 1b2d7cb5dad..993291dc0e0 100644
--- a/chromium/v8/src/heap/basic-memory-chunk.h
+++ b/chromium/v8/src/heap/basic-memory-chunk.h
@@ -106,6 +106,9 @@ class BasicMemoryChunk {
// because there exists a potential pointer to somewhere in the chunk which
// can't be updated.
PINNED = 1u << 22,
+
+ // This page belongs to a shared heap.
+ IN_SHARED_HEAP = 1u << 23,
};
static const intptr_t kAlignment =
@@ -255,6 +258,8 @@ class BasicMemoryChunk {
bool InOldSpace() const;
V8_EXPORT_PRIVATE bool InLargeObjectSpace() const;
+ bool InSharedHeap() const { return IsFlagSet(IN_SHARED_HEAP); }
+
bool IsWritable() const {
// If this is a read-only space chunk but heap_ is non-null, it has not yet
// been sealed and can be written to.
@@ -294,11 +299,13 @@ class BasicMemoryChunk {
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromAddress(Address a) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(a));
}
// Only works if the object is in the first kPageSize of the MemoryChunk.
static BasicMemoryChunk* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<BasicMemoryChunk*>(BaseAddress(o.ptr()));
}
diff --git a/chromium/v8/src/heap/code-range.cc b/chromium/v8/src/heap/code-range.cc
new file mode 100644
index 00000000000..738c12710c9
--- /dev/null
+++ b/chromium/v8/src/heap/code-range.cc
@@ -0,0 +1,172 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/code-range.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/common/globals.h"
+#include "src/flags/flags.h"
+#include "src/heap/heap-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<CodeRange>,
+ GetProcessWideCodeRangeCage)
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
+
+void FunctionInStaticBinaryForAddressHint() {}
+} // anonymous namespace
+
+Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ auto it = recently_freed_.find(code_range_size);
+ if (it == recently_freed_.end() || it->second.empty()) {
+ return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
+ }
+ Address result = it->second.back();
+ it->second.pop_back();
+ return result;
+}
+
+void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size) {
+ base::MutexGuard guard(&mutex_);
+ recently_freed_[code_range_size].push_back(code_range_start);
+}
+
+CodeRange::~CodeRange() { Free(); }
+
+bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
+ size_t requested) {
+ DCHECK_NE(requested, 0);
+
+ if (requested <= kMinimumCodeRangeSize) {
+ requested = kMinimumCodeRangeSize;
+ }
+ const size_t reserved_area =
+ kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
+ if (requested < (kMaximalCodeRangeSize - reserved_area)) {
+ requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
+ // Fullfilling both reserved pages requirement and huge code area
+ // alignments is not supported (requires re-implementation).
+ DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
+ }
+ DCHECK_IMPLIES(kPlatformRequiresCodeRange,
+ requested <= kMaximalCodeRangeSize);
+
+ VirtualMemoryCage::ReservationParams params;
+ params.page_allocator = page_allocator;
+ params.reservation_size = requested;
+ params.base_alignment =
+ VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
+ params.base_bias_size = reserved_area;
+ params.page_size = MemoryChunk::kPageSize;
+ params.requested_start_hint =
+ GetCodeRangeAddressHint()->GetAddressHint(requested);
+
+ if (!VirtualMemoryCage::InitReservation(params)) return false;
+
+ // On some platforms, specifically Win64, we need to reserve some pages at
+ // the beginning of an executable space. See
+ // https://cs.chromium.org/chromium/src/components/crash/content/
+ // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
+ // for details.
+ if (reserved_area > 0) {
+ if (!reservation()->SetPermissions(reservation()->address(), reserved_area,
+ PageAllocator::kReadWrite)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void CodeRange::Free() {
+ if (IsReserved()) {
+ GetCodeRangeAddressHint()->NotifyFreedCodeRange(
+ reservation()->region().begin(), reservation()->region().size());
+ VirtualMemoryCage::Free();
+ }
+}
+
+uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
+ const uint8_t* embedded_blob_code,
+ size_t embedded_blob_code_size) {
+ base::MutexGuard guard(&remap_embedded_builtins_mutex_);
+
+ const base::AddressRegion& code_region = reservation()->region();
+ CHECK_NE(code_region.begin(), kNullAddress);
+ CHECK(!code_region.is_empty());
+
+ uint8_t* embedded_blob_code_copy =
+ embedded_blob_code_copy_.load(std::memory_order_acquire);
+ if (embedded_blob_code_copy) {
+ DCHECK(
+ code_region.contains(reinterpret_cast<Address>(embedded_blob_code_copy),
+ embedded_blob_code_size));
+ SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy,
+ embedded_blob_code_size) == 0);
+ return embedded_blob_code_copy;
+ }
+
+ const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
+ size_t allocate_code_size =
+ RoundUp(embedded_blob_code_size, kAllocatePageSize);
+
+ // Allocate the re-embedded code blob in the end.
+ void* hint = reinterpret_cast<void*>(code_region.end() - allocate_code_size);
+
+ embedded_blob_code_copy =
+ reinterpret_cast<uint8_t*>(page_allocator()->AllocatePages(
+ hint, allocate_code_size, kAllocatePageSize,
+ PageAllocator::kNoAccess));
+
+ if (!embedded_blob_code_copy) {
+ V8::FatalProcessOutOfMemory(
+ isolate, "Can't allocate space for re-embedded builtins");
+ }
+
+ size_t code_size =
+ RoundUp(embedded_blob_code_size, page_allocator()->CommitPageSize());
+
+ if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
+ PageAllocator::kReadWrite)) {
+ V8::FatalProcessOutOfMemory(isolate,
+ "Re-embedded builtins: set permissions");
+ }
+ memcpy(embedded_blob_code_copy, embedded_blob_code, embedded_blob_code_size);
+
+ if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
+ PageAllocator::kReadExecute)) {
+ V8::FatalProcessOutOfMemory(isolate,
+ "Re-embedded builtins: set permissions");
+ }
+
+ embedded_blob_code_copy_.store(embedded_blob_code_copy,
+ std::memory_order_release);
+ return embedded_blob_code_copy;
+}
+
+// static
+void CodeRange::InitializeProcessWideCodeRangeOnce(
+ v8::PageAllocator* page_allocator, size_t requested_size) {
+ *GetProcessWideCodeRangeCage() = std::make_shared<CodeRange>();
+ if (!GetProcessWideCodeRange()->InitReservation(page_allocator,
+ requested_size)) {
+ V8::FatalProcessOutOfMemory(
+ nullptr, "Failed to reserve virtual memory for CodeRange");
+ }
+}
+
+// static
+std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() {
+ return *GetProcessWideCodeRangeCage();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/heap/code-range.h b/chromium/v8/src/heap/code-range.h
new file mode 100644
index 00000000000..b1bc6020b58
--- /dev/null
+++ b/chromium/v8/src/heap/code-range.h
@@ -0,0 +1,147 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CODE_RANGE_H_
+#define V8_HEAP_CODE_RANGE_H_
+
+#include <unordered_map>
+#include <vector>
+
+#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// The process-wide singleton that keeps track of code range regions with the
+// intention to reuse free code range regions as a workaround for CFG memory
+// leaks (see crbug.com/870054).
+class CodeRangeAddressHint {
+ public:
+ // Returns the most recently freed code range start address for the given
+ // size. If there is no such entry, then a random address is returned.
+ V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
+
+ V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
+ size_t code_range_size);
+
+ private:
+ base::Mutex mutex_;
+ // A map from code range size to an array of recently freed code range
+ // addresses. There should be O(1) different code range sizes.
+ // The length of each array is limited by the peak number of code ranges,
+ // which should be also O(1).
+ std::unordered_map<size_t, std::vector<Address>> recently_freed_;
+};
+
+// A code range is a virtual memory cage that may contain executable code. It
+// has the following layout.
+//
+// +------------+-----+---------------- ~~~ -+
+// | RW | ... | ... |
+// +------------+-----+----------------- ~~~ -+
+// ^ ^ ^
+// start base allocatable base
+//
+// <------------> <------------------------>
+// reserved allocatable region
+// <------------------------------------------->
+// code region
+//
+// The start of the reservation may include reserved page with read-write access
+// as required by some platforms (Win64). The cage's page allocator does not
+// control the optional reserved page in the beginning of the code region.
+//
+// The following conditions hold:
+// 1) |reservation()->region()| >= |optional RW pages| +
+// |reservation()->page_allocator()|
+// 2) |reservation()| is AllocatePageSize()-aligned
+// 3) |reservation()->page_allocator()| (i.e. allocatable base) is
+// MemoryChunk::kAlignment-aligned
+// 4) |base()| is CommitPageSize()-aligned
+class CodeRange final : public VirtualMemoryCage {
+ public:
+ V8_EXPORT_PRIVATE ~CodeRange();
+
+ uint8_t* embedded_blob_code_copy() const {
+ // remap_embedded_builtins_mutex_ is designed to protect write contention to
+ // embedded_blob_code_copy_. It is safe to be read without taking the
+ // mutex. It is read to check if short builtins ought to be enabled because
+ // a shared CodeRange has already remapped builtins and to find where the
+ // instruction stream for a builtin is.
+ //
+ // For the first, this racing with an Isolate calling RemapEmbeddedBuiltins
+ // may result in disabling short builtins, which is not a correctness issue.
+ //
+ // For the second, this racing with an Isolate calling RemapEmbeddedBuiltins
+ // may result in an already running Isolate that did not have short builtins
+ // enabled (due to max old generation size) to switch over to using remapped
+ // builtins, which is also not a correctness issue as the remapped builtins
+ // are byte-equivalent.
+ //
+ // Both these scenarios should be rare. The initial Isolate is usually
+ // created by itself, i.e. without contention. Additionally, the first
+ // Isolate usually remaps builtins on machines with enough memory, not
+ // subsequent Isolates in the same process.
+ return embedded_blob_code_copy_.load(std::memory_order_acquire);
+ }
+
+#ifdef V8_OS_WIN64
+ // 64-bit Windows needs to track how many Isolates are using the CodeRange for
+ // registering and unregistering of unwind info. Note that even though
+ // CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
+ // not be used for synchronization as it's usually implemented with a relaxed
+ // read.
+ uint32_t AtomicIncrementUnwindInfoUseCount() {
+ return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
+ }
+
+ uint32_t AtomicDecrementUnwindInfoUseCount() {
+ return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
+ }
+#endif // V8_OS_WIN64
+
+ bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
+
+ void Free();
+
+ // Remap and copy the embedded builtins into this CodeRange. This method is
+ // idempotent and only performs the copy once. This property is so that this
+ // method can be used uniformly regardless of having a per-Isolate or a shared
+ // pointer cage. Returns the address of the copy.
+ //
+ // The builtins code region will be freed with the code range at tear down.
+ //
+ // When ENABLE_SLOW_DCHECKS is on, the contents of the embedded_blob_code are
+ // compared against the already copied version.
+ uint8_t* RemapEmbeddedBuiltins(Isolate* isolate,
+ const uint8_t* embedded_blob_code,
+ size_t embedded_blob_code_size);
+
+ static void InitializeProcessWideCodeRangeOnce(
+ v8::PageAllocator* page_allocator, size_t requested_size);
+
+ // If InitializeProcessWideCodeRangeOnce has been called, returns the
+ // initialized CodeRange. Otherwise returns an empty std::shared_ptr.
+ V8_EXPORT_PRIVATE static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
+
+ private:
+ // Used when short builtin calls are enabled, where embedded builtins are
+ // copied into the CodeRange so calls can be nearer.
+ std::atomic<uint8_t*> embedded_blob_code_copy_{nullptr};
+
+ // When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
+ // race during Isolate::Init.
+ base::Mutex remap_embedded_builtins_mutex_;
+
+#ifdef V8_OS_WIN64
+ std::atomic<uint32_t> unwindinfo_use_count_{0};
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_CODE_RANGE_H_
diff --git a/chromium/v8/src/heap/collection-barrier.cc b/chromium/v8/src/heap/collection-barrier.cc
index 92007690aa7..feeb23d0008 100644
--- a/chromium/v8/src/heap/collection-barrier.cc
+++ b/chromium/v8/src/heap/collection-barrier.cc
@@ -22,8 +22,7 @@ bool CollectionBarrier::CollectionRequested() {
}
LocalHeap::ThreadState CollectionBarrier::main_thread_state_relaxed() {
- LocalHeap* main_thread_local_heap =
- heap_->isolate()->main_thread_local_heap();
+ LocalHeap* main_thread_local_heap = heap_->main_thread_local_heap();
return main_thread_local_heap->state_relaxed();
}
diff --git a/chromium/v8/src/heap/combined-heap.h b/chromium/v8/src/heap/combined-heap.h
index 55664114d39..9c9ed9039fc 100644
--- a/chromium/v8/src/heap/combined-heap.h
+++ b/chromium/v8/src/heap/combined-heap.h
@@ -36,7 +36,8 @@ V8_WARN_UNUSED_RESULT inline bool IsValidHeapObject(Heap* heap,
if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
return third_party_heap::Heap::IsValidHeapObject(object);
else
- return ReadOnlyHeap::Contains(object) || heap->Contains(object);
+ return ReadOnlyHeap::Contains(object) || heap->Contains(object) ||
+ heap->SharedHeapContains(object);
}
} // namespace internal
diff --git a/chromium/v8/src/heap/concurrent-allocator-inl.h b/chromium/v8/src/heap/concurrent-allocator-inl.h
index c92b91ca476..07d669b17cb 100644
--- a/chromium/v8/src/heap/concurrent-allocator-inl.h
+++ b/chromium/v8/src/heap/concurrent-allocator-inl.h
@@ -20,6 +20,7 @@ namespace internal {
AllocationResult ConcurrentAllocator::AllocateRaw(int object_size,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
// TODO(dinfuehr): Add support for allocation observers
#ifdef DEBUG
local_heap_->VerifyCurrent();
diff --git a/chromium/v8/src/heap/concurrent-marking.cc b/chromium/v8/src/heap/concurrent-marking.cc
index eb1511f71d9..6f0aa89ebdd 100644
--- a/chromium/v8/src/heap/concurrent-marking.cc
+++ b/chromium/v8/src/heap/concurrent-marking.cc
@@ -8,6 +8,7 @@
#include <unordered_map>
#include "include/v8config.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
@@ -136,13 +137,13 @@ class ConcurrentMarkingVisitor final
int VisitSeqOneByteString(Map map, SeqOneByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object);
- return SeqOneByteString::SizeFor(object.synchronized_length());
+ return SeqOneByteString::SizeFor(object.length(kAcquireLoad));
}
int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
if (!ShouldVisit(object)) return 0;
VisitMapPointer(object);
- return SeqTwoByteString::SizeFor(object.synchronized_length());
+ return SeqTwoByteString::SizeFor(object.length(kAcquireLoad));
}
// Implements ephemeron semantics: Marks value if key is already reachable.
@@ -232,9 +233,9 @@ class ConcurrentMarkingVisitor final
template <typename T>
int VisitLeftTrimmableArray(Map map, T object) {
- // The synchronized_length() function checks that the length is a Smi.
+ // The length() function checks that the length is a Smi.
// This is not necessarily the case if the array is being left-trimmed.
- Object length = object.unchecked_synchronized_length();
+ Object length = object.unchecked_length(kAcquireLoad);
if (!ShouldVisit(object)) return 0;
// The cached length must be the actual length as the array is not black.
// Left trimming marks the array black before over-writing the length.
@@ -460,16 +461,28 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
break;
}
objects_processed++;
- // The order of the two loads is important.
- Address new_space_top = heap_->new_space()->original_top_acquire();
- Address new_space_limit = heap_->new_space()->original_limit_relaxed();
- Address new_large_object = heap_->new_lo_space()->pending_object();
+
+ Address new_space_top = kNullAddress;
+ Address new_space_limit = kNullAddress;
+ Address new_large_object = kNullAddress;
+
+ if (heap_->new_space()) {
+ // The order of the two loads is important.
+ new_space_top = heap_->new_space()->original_top_acquire();
+ new_space_limit = heap_->new_space()->original_limit_relaxed();
+ }
+
+ if (heap_->new_lo_space()) {
+ new_large_object = heap_->new_lo_space()->pending_object();
+ }
+
Address addr = object.address();
+
if ((new_space_top <= addr && addr < new_space_limit) ||
addr == new_large_object) {
local_marking_worklists.PushOnHold(object);
} else {
- Map map = object.synchronized_map(isolate);
+ Map map = object.map(isolate, kAcquireLoad);
if (is_per_context_mode) {
Address context;
if (native_context_inferrer.Infer(isolate, map, object, &context)) {
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
index 636f666521b..006c35808f3 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -5,6 +5,8 @@
#include "src/heap/cppgc-js/cpp-heap.h"
#include <cstdint>
+#include <memory>
+#include <numeric>
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
@@ -29,6 +31,7 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
+#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
#include "src/heap/embedder-tracing.h"
@@ -65,6 +68,13 @@ cppgc::HeapStatistics CppHeap::CollectStatistics(
detail_level);
}
+void CppHeap::CollectCustomSpaceStatisticsAtLastGC(
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver) {
+ return internal::CppHeap::From(this)->CollectCustomSpaceStatisticsAtLastGC(
+ std::move(custom_spaces), std::move(receiver));
+}
+
void CppHeap::EnableDetachedGarbageCollectionsForTesting() {
return internal::CppHeap::From(this)
->EnableDetachedGarbageCollectionsForTesting();
@@ -200,7 +210,7 @@ UnifiedHeapMarker::UnifiedHeapMarker(Key key, Heap* v8_heap,
void UnifiedHeapMarker::AddObject(void* object) {
mutator_marking_state_.MarkAndPush(
- cppgc::internal::HeapObjectHeader::FromPayload(object));
+ cppgc::internal::HeapObjectHeader::FromObject(object));
}
} // namespace
@@ -318,10 +328,6 @@ void CppHeap::TracePrologue(TraceFlags flags) {
}
bool CppHeap::AdvanceTracing(double deadline_in_ms) {
- // TODO(chromium:1154636): The kAtomicMark/kIncrementalMark scope below is
- // needed for recording all cpp marking time. Note that it can lead to double
- // accounting since this scope is also accounted under an outer v8 scope.
- // Make sure to only account this scope once.
cppgc::internal::StatsCollector::EnabledScope stats_scope(
stats_collector(),
in_atomic_pause_ ? cppgc::internal::StatsCollector::kAtomicMark
@@ -342,8 +348,6 @@ bool CppHeap::IsTracingDone() { return marking_done_; }
void CppHeap::EnterFinalPause(EmbedderStackState stack_state) {
CHECK(!in_disallow_gc_scope());
- cppgc::internal::StatsCollector::EnabledScope stats_scope(
- stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
in_atomic_pause_ = true;
if (override_stack_state_) {
stack_state = *override_stack_state_;
@@ -359,8 +363,6 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
CHECK(in_atomic_pause_);
CHECK(marking_done_);
{
- cppgc::internal::StatsCollector::EnabledScope stats_scope(
- stats_collector(), cppgc::internal::StatsCollector::kAtomicMark);
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
marker_->LeaveAtomicPause();
}
@@ -376,7 +378,8 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
UnifiedHeapMarkingVerifier verifier(*this);
- verifier.Run(stack_state_of_prev_gc_);
+ verifier.Run(stack_state_of_prev_gc(), stack_end_of_current_gc(),
+ stats_collector()->marked_bytes());
#endif
{
@@ -422,12 +425,17 @@ void CppHeap::ReportBufferedAllocationSizeIfPossible() {
return;
}
- if (buffered_allocated_bytes_ < 0) {
- DecreaseAllocatedSize(static_cast<size_t>(-buffered_allocated_bytes_));
+ // The calls below may trigger full GCs that are synchronous and also execute
+ // epilogue callbacks. Since such callbacks may allocate, the counter must
+ // already be zeroed by that time.
+ const int64_t bytes_to_report = buffered_allocated_bytes_;
+ buffered_allocated_bytes_ = 0;
+
+ if (bytes_to_report < 0) {
+ DecreaseAllocatedSize(static_cast<size_t>(-bytes_to_report));
} else {
- IncreaseAllocatedSize(static_cast<size_t>(buffered_allocated_bytes_));
+ IncreaseAllocatedSize(static_cast<size_t>(bytes_to_report));
}
- buffered_allocated_bytes_ = 0;
}
void CppHeap::CollectGarbageForTesting(
@@ -437,6 +445,8 @@ void CppHeap::CollectGarbageForTesting(
// Finish sweeping in case it is still running.
sweeper().FinishIfRunning();
+ SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+
if (isolate_) {
// Go through EmbedderHeapTracer API and perform a unified heap collection.
GarbageCollectionForTesting(stack_state);
@@ -481,5 +491,82 @@ void CppHeap::FinalizeIncrementalGarbageCollectionForTesting(
sweeper_.FinishIfRunning();
}
+namespace {
+
+void ReportCustomSpaceStatistics(
+ cppgc::internal::RawHeap& raw_heap,
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver) {
+ for (auto custom_space_index : custom_spaces) {
+ const cppgc::internal::BaseSpace* space =
+ raw_heap.CustomSpace(custom_space_index);
+ size_t allocated_bytes = std::accumulate(
+ space->begin(), space->end(), 0, [](size_t sum, auto* page) {
+ return sum + page->AllocatedBytesAtLastGC();
+ });
+ receiver->AllocatedBytes(custom_space_index, allocated_bytes);
+ }
+}
+
+class CollectCustomSpaceStatisticsAtLastGCTask final : public v8::Task {
+ public:
+ static constexpr v8::base::TimeDelta kTaskDelayMs =
+ v8::base::TimeDelta::FromMilliseconds(10);
+
+ CollectCustomSpaceStatisticsAtLastGCTask(
+ cppgc::internal::HeapBase& heap,
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver)
+ : heap_(heap),
+ custom_spaces_(std::move(custom_spaces)),
+ receiver_(std::move(receiver)) {}
+
+ void Run() final {
+ cppgc::internal::Sweeper& sweeper = heap_.sweeper();
+ if (sweeper.PerformSweepOnMutatorThread(
+ heap_.platform()->MonotonicallyIncreasingTime() +
+ kStepSizeMs.InSecondsF())) {
+ // Sweeping is done.
+ DCHECK(!sweeper.IsSweepingInProgress());
+ ReportCustomSpaceStatistics(heap_.raw_heap(), std::move(custom_spaces_),
+ std::move(receiver_));
+ } else {
+ heap_.platform()->GetForegroundTaskRunner()->PostDelayedTask(
+ std::make_unique<CollectCustomSpaceStatisticsAtLastGCTask>(
+ heap_, std::move(custom_spaces_), std::move(receiver_)),
+ kTaskDelayMs.InSecondsF());
+ }
+ }
+
+ private:
+ static constexpr v8::base::TimeDelta kStepSizeMs =
+ v8::base::TimeDelta::FromMilliseconds(5);
+
+ cppgc::internal::HeapBase& heap_;
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces_;
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver_;
+};
+
+constexpr v8::base::TimeDelta
+ CollectCustomSpaceStatisticsAtLastGCTask::kTaskDelayMs;
+constexpr v8::base::TimeDelta
+ CollectCustomSpaceStatisticsAtLastGCTask::kStepSizeMs;
+
+} // namespace
+
+void CppHeap::CollectCustomSpaceStatisticsAtLastGC(
+ std::vector<cppgc::CustomSpaceIndex> custom_spaces,
+ std::unique_ptr<CustomSpaceStatisticsReceiver> receiver) {
+ if (sweeper().IsSweepingInProgress()) {
+ platform()->GetForegroundTaskRunner()->PostDelayedTask(
+ std::make_unique<CollectCustomSpaceStatisticsAtLastGCTask>(
+ AsBase(), std::move(custom_spaces), std::move(receiver)),
+ CollectCustomSpaceStatisticsAtLastGCTask::kTaskDelayMs.InSecondsF());
+ return;
+ }
+ ReportCustomSpaceStatistics(raw_heap(), std::move(custom_spaces),
+ std::move(receiver));
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-heap.h b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
index b13fd25a323..cf99b8fe602 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/chromium/v8/src/heap/cppgc-js/cpp-heap.h
@@ -5,6 +5,11 @@
#ifndef V8_HEAP_CPPGC_JS_CPP_HEAP_H_
#define V8_HEAP_CPPGC_JS_CPP_HEAP_H_
+#if CPPGC_IS_STANDALONE
+static_assert(
+ false, "V8 targets can not be built with cppgc_is_standalone set to true.");
+#endif
+
#include "include/v8-cppgc.h"
#include "include/v8.h"
#include "src/base/macros.h"
@@ -55,6 +60,10 @@ class V8_EXPORT_PRIVATE CppHeap final
void CollectGarbageForTesting(
cppgc::internal::GarbageCollector::Config::StackState);
+ void CollectCustomSpaceStatisticsAtLastGC(
+ std::vector<cppgc::CustomSpaceIndex>,
+ std::unique_ptr<CustomSpaceStatisticsReceiver>);
+
// v8::EmbedderHeapTracer interface.
void RegisterV8References(
const std::vector<std::pair<void*, void*> >& embedder_fields) final;
diff --git a/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc b/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
index 79a863e3026..17929247ee4 100644
--- a/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/chromium/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -382,7 +382,7 @@ class CppGraphBuilderImpl final {
EmbedderNode* AddNode(const HeapObjectHeader& header) {
return static_cast<EmbedderNode*>(
graph_.AddNode(std::unique_ptr<v8::EmbedderGraph::Node>{
- new EmbedderNode(header.GetName().value, header.GetSize())}));
+ new EmbedderNode(header.GetName().value, header.AllocatedSize())}));
}
void AddEdge(State& parent, const HeapObjectHeader& header) {
@@ -418,7 +418,7 @@ class CppGraphBuilderImpl final {
if (HasEmbedderDataBackref(
reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
- v8_value, parent.header()->Payload())) {
+ v8_value, parent.header()->ObjectStart())) {
parent.get_node()->SetWrapperNode(v8_node);
auto* profiler =
@@ -512,7 +512,7 @@ class VisiblityVisitor final : public JSVisitor {
void Visit(const void*, cppgc::TraceDescriptor desc) final {
graph_builder_.VisitForVisibility(
&parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromPayload(desc.base_object_payload));
+ HeapObjectHeader::FromObject(desc.base_object_payload));
}
void VisitRoot(const void*, cppgc::TraceDescriptor,
const cppgc::SourceLocation&) final {}
@@ -556,13 +556,13 @@ class GraphBuildingVisitor final : public JSVisitor {
void Visit(const void*, cppgc::TraceDescriptor desc) final {
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromPayload(desc.base_object_payload));
+ HeapObjectHeader::FromObject(desc.base_object_payload));
}
void VisitRoot(const void*, cppgc::TraceDescriptor desc,
const cppgc::SourceLocation& loc) final {
graph_builder_.VisitRootForGraphBuilding(
parent_scope_.ParentAsRootState(),
- HeapObjectHeader::FromPayload(desc.base_object_payload), loc);
+ HeapObjectHeader::FromObject(desc.base_object_payload), loc);
}
void VisitWeakRoot(const void*, cppgc::TraceDescriptor, cppgc::WeakCallback,
const void*, const cppgc::SourceLocation&) final {}
diff --git a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
index ea14b520480..b0f8595ec74 100644
--- a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
+++ b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.cc
@@ -35,7 +35,7 @@ class UnifiedHeapVerificationVisitor final : public JSVisitor {
void VisitWeakContainer(const void* object, cppgc::TraceDescriptor,
cppgc::TraceDescriptor weak_desc, cppgc::WeakCallback,
- const void*) {
+ const void*) final {
if (!object) return;
// Contents of weak containers are found themselves through page iteration
@@ -58,13 +58,8 @@ class UnifiedHeapVerificationVisitor final : public JSVisitor {
UnifiedHeapMarkingVerifier::UnifiedHeapMarkingVerifier(
cppgc::internal::HeapBase& heap_base)
: MarkingVerifierBase(
- heap_base, std::make_unique<UnifiedHeapVerificationVisitor>(state_)) {
-}
-
-void UnifiedHeapMarkingVerifier::SetCurrentParent(
- const cppgc::internal::HeapObjectHeader* parent) {
- state_.SetCurrentParent(parent);
-}
+ heap_base, state_,
+ std::make_unique<UnifiedHeapVerificationVisitor>(state_)) {}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
index 3a54b4dd323..bb2ac09e385 100644
--- a/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
+++ b/chromium/v8/src/heap/cppgc-js/unified-heap-marking-verifier.h
@@ -16,8 +16,6 @@ class V8_EXPORT_PRIVATE UnifiedHeapMarkingVerifier final
explicit UnifiedHeapMarkingVerifier(cppgc::internal::HeapBase&);
~UnifiedHeapMarkingVerifier() final = default;
- void SetCurrentParent(const cppgc::internal::HeapObjectHeader*) final;
-
private:
// TODO(chromium:1056170): Use a verification state that can handle JS
// references.
diff --git a/chromium/v8/src/heap/cppgc/caged-heap.cc b/chromium/v8/src/heap/cppgc/caged-heap.cc
index 951fb0e853c..3a8304f4481 100644
--- a/chromium/v8/src/heap/cppgc/caged-heap.cc
+++ b/chromium/v8/src/heap/cppgc/caged-heap.cc
@@ -45,6 +45,27 @@ VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
UNREACHABLE();
}
+class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
+ public:
+ CppgcBoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
+ size_t size, size_t allocate_page_size)
+ : BoundedPageAllocator(page_allocator, start, size, allocate_page_size) {}
+
+ bool FreePages(void* address, size_t size) final {
+ // BoundedPageAllocator is not guaranteed to allocate zeroed page.
+ // Specifically it is possible that BPA frees a page and then tries to
+ // reallocate the same page before the OS has had a chance to asyncroniously
+ // reclaim that page. In such cases, the contents of the page would not have
+ // been cleared by the OS and the reallocated page will keep its previous
+ // contents. To mitigate this problem, CppgcBoundedPageAllocator clears all
+ // pages before they are freed. This also includes protected guard pages, so
+ // CppgcBoundedPageAllocator needs to update permissions before clearing.
+ SetPermissions(address, size, Permission::kReadWrite);
+ memset(address, 0, size);
+ return v8::base::BoundedPageAllocator::FreePages(address, size);
+ }
+};
+
} // namespace
CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
@@ -73,7 +94,7 @@ CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
caged_heap_start -
reinterpret_cast<CagedAddress>(reserved_area_.address());
- bounded_allocator_ = std::make_unique<CagedHeap::AllocatorType>(
+ bounded_allocator_ = std::make_unique<CppgcBoundedPageAllocator>(
platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize);
}
diff --git a/chromium/v8/src/heap/cppgc/compactor.cc b/chromium/v8/src/heap/cppgc/compactor.cc
index f4498e7fbc3..5f687050144 100644
--- a/chromium/v8/src/heap/cppgc/compactor.cc
+++ b/chromium/v8/src/heap/cppgc/compactor.cc
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-poisoner.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -129,7 +130,7 @@ void MovableReferences::AddOrFilter(MovableReference* slot) {
interior_movable_references_.find(slot));
interior_movable_references_.emplace(slot, nullptr);
#if DEBUG
- interior_slot_to_object_.emplace(slot, slot_header.Payload());
+ interior_slot_to_object_.emplace(slot, slot_header.ObjectStart());
#endif // DEBUG
}
@@ -144,8 +145,8 @@ void MovableReferences::Relocate(Address from, Address to) {
// find the corresponding slot A.x. Object A may be moved already and the
// memory may have been freed, which would result in a crash.
if (!interior_movable_references_.empty()) {
- const HeapObjectHeader& header = HeapObjectHeader::FromPayload(to);
- const size_t size = header.GetSize() - sizeof(HeapObjectHeader);
+ const HeapObjectHeader& header = HeapObjectHeader::FromObject(to);
+ const size_t size = header.ObjectSize();
RelocateInteriorReferences(from, to, size);
}
@@ -275,14 +276,14 @@ class CompactionState final {
// Return remaining available pages to the free page pool, decommitting
// them from the pagefile.
for (NormalPage* page : available_pages_) {
- SET_MEMORY_INACCESSIBLE(page->PayloadStart(), page->PayloadSize());
+ SetMemoryInaccessible(page->PayloadStart(), page->PayloadSize());
NormalPage::Destroy(page);
}
}
void FinishCompactingPage(NormalPage* page) {
-#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
- defined(MEMORY_SANITIZER)
+#if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
+ defined(V8_USE_ADDRESS_SANITIZER)
// Zap the unused portion, until it is either compacted into or freed.
if (current_page_ != page) {
ZapMemory(page->PayloadStart(), page->PayloadSize());
@@ -303,7 +304,7 @@ class CompactionState final {
current_page_->PayloadSize() - used_bytes_in_current_page_;
Address payload = current_page_->PayloadStart();
Address free_start = payload + used_bytes_in_current_page_;
- SET_MEMORY_INACCESSIBLE(free_start, freed_size);
+ SetMemoryInaccessible(free_start, freed_size);
space_->free_list().Add({free_start, freed_size});
current_page_->object_start_bitmap().SetBit(free_start);
}
@@ -329,7 +330,7 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
header_address < page->PayloadEnd();) {
HeapObjectHeader* header =
reinterpret_cast<HeapObjectHeader*>(header_address);
- size_t size = header->GetSize();
+ size_t size = header->AllocatedSize();
DCHECK_GT(size, 0u);
DCHECK_LT(size, kPageSize);
@@ -349,8 +350,8 @@ void CompactPage(NormalPage* page, CompactionState& compaction_state) {
// As compaction is under way, leave the freed memory accessible
// while compacting the rest of the page. We just zap the payload
// to catch out other finalizers trying to access it.
-#if DEBUG || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
- defined(MEMORY_SANITIZER)
+#if DEBUG || defined(V8_USE_MEMORY_SANITIZER) || \
+ defined(V8_USE_ADDRESS_SANITIZER)
ZapMemory(header, size);
#endif
header_address += size;
diff --git a/chromium/v8/src/heap/cppgc/concurrent-marker.cc b/chromium/v8/src/heap/cppgc/concurrent-marker.cc
index 34953b9ec3b..6763515f280 100644
--- a/chromium/v8/src/heap/cppgc/concurrent-marker.cc
+++ b/chromium/v8/src/heap/cppgc/concurrent-marker.cc
@@ -125,7 +125,7 @@ void ConcurrentMarkingTask::ProcessWorklists(
BasePage::FromPayload(item.base_object_payload)
->SynchronizedLoad();
const HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(item.base_object_payload);
+ HeapObjectHeader::FromObject(item.base_object_payload);
DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
DCHECK(header.IsMarked<AccessMode::kAtomic>());
concurrent_marking_state.AccountMarkedBytes(header);
diff --git a/chromium/v8/src/heap/cppgc/explicit-management.cc b/chromium/v8/src/heap/cppgc/explicit-management.cc
index b3ab5f5b515..6e327339aa1 100644
--- a/chromium/v8/src/heap/cppgc/explicit-management.cc
+++ b/chromium/v8/src/heap/cppgc/explicit-management.cc
@@ -9,50 +9,46 @@
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
-#include "src/heap/cppgc/sanitizers.h"
+#include "src/heap/cppgc/memory.h"
namespace cppgc {
namespace internal {
namespace {
-std::pair<bool, BasePage*> CanModifyObject(void* object) {
- // object is guaranteed to be of type GarbageCollected, so getting the
- // BasePage is okay for regular and large objects.
- auto* base_page = BasePage::FromPayload(object);
- auto* heap = base_page->heap();
+bool InGC(HeapHandle& heap_handle) {
+ const auto& heap = HeapBase::From(heap_handle);
// Whenever the GC is active, avoid modifying the object as it may mess with
// state that the GC needs.
- const bool in_gc = heap->in_atomic_pause() || heap->marker() ||
- heap->sweeper().IsSweepingInProgress();
- return {!in_gc, base_page};
+ return heap.in_atomic_pause() || heap.marker() ||
+ heap.sweeper().IsSweepingInProgress();
}
} // namespace
-void FreeUnreferencedObject(void* object) {
- bool can_free;
- BasePage* base_page;
- std::tie(can_free, base_page) = CanModifyObject(object);
- if (!can_free) {
+void FreeUnreferencedObject(HeapHandle& heap_handle, void* object) {
+ if (InGC(heap_handle)) {
return;
}
- auto& header = HeapObjectHeader::FromPayload(object);
+ auto& header = HeapObjectHeader::FromObject(object);
header.Finalize();
+ // `object` is guaranteed to be of type GarbageCollected, so getting the
+ // BasePage is okay for regular and large objects.
+ BasePage* base_page = BasePage::FromPayload(object);
if (base_page->is_large()) { // Large object.
base_page->space()->RemovePage(base_page);
base_page->heap()->stats_collector()->NotifyExplicitFree(
LargePage::From(base_page)->PayloadSize());
LargePage::Destroy(LargePage::From(base_page));
} else { // Regular object.
- const size_t header_size = header.GetSize();
+ const size_t header_size = header.AllocatedSize();
auto* normal_page = NormalPage::From(base_page);
auto& normal_space = *static_cast<NormalPageSpace*>(base_page->space());
auto& lab = normal_space.linear_allocation_buffer();
- ConstAddress payload_end = header.PayloadEnd();
- SET_MEMORY_INACCESSIBLE(&header, header_size);
+ ConstAddress payload_end = header.ObjectEnd();
+ SetMemoryInaccessible(&header, header_size);
if (payload_end == lab.start()) { // Returning to LAB.
lab.Set(reinterpret_cast<Address>(&header), lab.size() + header_size);
normal_page->object_start_bitmap().ClearBit(lab.start());
@@ -69,18 +65,18 @@ namespace {
bool Grow(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
size_t size_delta) {
- DCHECK_GE(new_size, header.GetSize() + kAllocationGranularity);
+ DCHECK_GE(new_size, header.AllocatedSize() + kAllocationGranularity);
DCHECK_GE(size_delta, kAllocationGranularity);
DCHECK(!base_page.is_large());
auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
auto& lab = normal_space.linear_allocation_buffer();
- if (lab.start() == header.PayloadEnd() && lab.size() >= size_delta) {
+ if (lab.start() == header.ObjectEnd() && lab.size() >= size_delta) {
// LABs are considered used memory which means that no allocated size
// adjustments are needed.
Address delta_start = lab.Allocate(size_delta);
- SET_MEMORY_ACCESSIBLE(delta_start, size_delta);
- header.SetSize(new_size);
+ SetMemoryAccessible(delta_start, size_delta);
+ header.SetAllocatedSize(new_size);
return true;
}
return false;
@@ -88,30 +84,30 @@ bool Grow(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
size_t size_delta) {
- DCHECK_GE(header.GetSize(), new_size + kAllocationGranularity);
+ DCHECK_GE(header.AllocatedSize(), new_size + kAllocationGranularity);
DCHECK_GE(size_delta, kAllocationGranularity);
DCHECK(!base_page.is_large());
auto& normal_space = *static_cast<NormalPageSpace*>(base_page.space());
auto& lab = normal_space.linear_allocation_buffer();
- Address free_start = header.PayloadEnd() - size_delta;
- if (lab.start() == header.PayloadEnd()) {
+ Address free_start = header.ObjectEnd() - size_delta;
+ if (lab.start() == header.ObjectEnd()) {
DCHECK_EQ(free_start, lab.start() - size_delta);
// LABs are considered used memory which means that no allocated size
// adjustments are needed.
lab.Set(free_start, lab.size() + size_delta);
- SET_MEMORY_INACCESSIBLE(lab.start(), size_delta);
- header.SetSize(new_size);
+ SetMemoryInaccessible(lab.start(), size_delta);
+ header.SetAllocatedSize(new_size);
return true;
}
// Heuristic: Only return memory to the free list if the block is larger than
// the smallest size class.
if (size_delta >= ObjectAllocator::kSmallestSpaceSize) {
- SET_MEMORY_INACCESSIBLE(free_start, size_delta);
+ SetMemoryInaccessible(free_start, size_delta);
base_page.heap()->stats_collector()->NotifyExplicitFree(size_delta);
normal_space.free_list().Add({free_start, size_delta});
NormalPage::From(&base_page)->object_start_bitmap().SetBit(free_start);
- header.SetSize(new_size);
+ header.SetAllocatedSize(new_size);
}
// Return success in any case, as we want to avoid that embedders start
// copying memory because of small deltas.
@@ -121,10 +117,11 @@ bool Shrink(HeapObjectHeader& header, BasePage& base_page, size_t new_size,
} // namespace
bool Resize(void* object, size_t new_object_size) {
- bool can_resize;
- BasePage* base_page;
- std::tie(can_resize, base_page) = CanModifyObject(object);
- if (!can_resize) {
+ // `object` is guaranteed to be of type GarbageCollected, so getting the
+ // BasePage is okay for regular and large objects.
+ BasePage* base_page = BasePage::FromPayload(object);
+
+ if (InGC(*base_page->heap())) {
return false;
}
@@ -136,8 +133,8 @@ bool Resize(void* object, size_t new_object_size) {
const size_t new_size = RoundUp<kAllocationGranularity>(
sizeof(HeapObjectHeader) + new_object_size);
- auto& header = HeapObjectHeader::FromPayload(object);
- const size_t old_size = header.GetSize();
+ auto& header = HeapObjectHeader::FromObject(object);
+ const size_t old_size = header.AllocatedSize();
if (new_size > old_size) {
return Grow(header, *base_page, new_size, new_size - old_size);
diff --git a/chromium/v8/src/heap/cppgc/free-list.cc b/chromium/v8/src/heap/cppgc/free-list.cc
index 705d31725ad..600e15312cd 100644
--- a/chromium/v8/src/heap/cppgc/free-list.cc
+++ b/chromium/v8/src/heap/cppgc/free-list.cc
@@ -8,9 +8,9 @@
#include "include/cppgc/internal/logging.h"
#include "src/base/bits.h"
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
namespace internal {
@@ -132,7 +132,7 @@ FreeList::Block FreeList::Allocate(size_t allocation_size) {
// Final bucket candidate; check initial entry if it is able
// to service this allocation. Do not perform a linear scan,
// as it is considered too costly.
- if (!entry || entry->GetSize() < allocation_size) break;
+ if (!entry || entry->AllocatedSize() < allocation_size) break;
}
if (entry) {
if (!entry->Next()) {
@@ -141,7 +141,7 @@ FreeList::Block FreeList::Allocate(size_t allocation_size) {
}
entry->Unlink(&free_list_heads_[index]);
biggest_free_list_index_ = index;
- return {entry, entry->GetSize()};
+ return {entry, entry->AllocatedSize()};
}
}
biggest_free_list_index_ = index;
@@ -158,7 +158,7 @@ size_t FreeList::Size() const {
size_t size = 0;
for (auto* entry : free_list_heads_) {
while (entry) {
- size += entry->GetSize();
+ size += entry->AllocatedSize();
entry = entry->Next();
}
}
@@ -175,7 +175,7 @@ bool FreeList::ContainsForTesting(Block block) const {
for (Entry* entry = list; entry; entry = entry->Next()) {
if (entry <= block.address &&
(reinterpret_cast<Address>(block.address) + block.size <=
- reinterpret_cast<Address>(entry) + entry->GetSize()))
+ reinterpret_cast<Address>(entry) + entry->AllocatedSize()))
return true;
}
}
@@ -204,7 +204,7 @@ void FreeList::CollectStatistics(
size_t entry_size = 0;
for (Entry* entry = free_list_heads_[i]; entry; entry = entry->Next()) {
++entry_count;
- entry_size += entry->GetSize();
+ entry_size += entry->AllocatedSize();
}
bucket_size.push_back(static_cast<size_t>(1) << i);
free_count.push_back(entry_count);
diff --git a/chromium/v8/src/heap/cppgc/gc-info-table.cc b/chromium/v8/src/heap/cppgc/gc-info-table.cc
index 6b177848cbb..7462ba8a21d 100644
--- a/chromium/v8/src/heap/cppgc/gc-info-table.cc
+++ b/chromium/v8/src/heap/cppgc/gc-info-table.cc
@@ -35,8 +35,9 @@ PageAllocator* GetAllocator(PageAllocator* page_allocator) {
default_page_allocator;
page_allocator = default_page_allocator.get();
}
- // TODO(chromium:1056170): Wrap page_allocator into LsanPageAllocator when
- // running with LEAK_SANITIZER.
+ // No need to introduce LSAN support for PageAllocator, as `GCInfoTable` is
+ // already a leaky object and the table payload (`GCInfoTable::table_`) should
+ // not refer to dynamically allocated objects.
return page_allocator;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-base.cc b/chromium/v8/src/heap/cppgc/heap-base.cc
index f89c4c9f112..c7664f09c69 100644
--- a/chromium/v8/src/heap/cppgc/heap-base.cc
+++ b/chromium/v8/src/heap/cppgc/heap-base.cc
@@ -5,8 +5,8 @@
#include "src/heap/cppgc/heap-base.h"
#include "include/cppgc/heap-consistency.h"
-#include "src/base/bounded-page-allocator.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/lsan-page-allocator.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
@@ -15,6 +15,7 @@
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-verifier.h"
+#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -35,13 +36,7 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
private:
static size_t ObjectSize(const HeapObjectHeader* header) {
- const size_t size =
- header->IsLargeObject()
- ? static_cast<const LargePage*>(BasePage::FromPayload(header))
- ->PayloadSize()
- : header->GetSize();
- DCHECK_GE(size, sizeof(HeapObjectHeader));
- return size - sizeof(HeapObjectHeader);
+ return ObjectView(*header).Size();
}
bool VisitHeapObjectHeader(HeapObjectHeader* header) {
@@ -62,13 +57,16 @@ HeapBase::HeapBase(
std::unique_ptr<MetricRecorder> histogram_recorder)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
+#if defined(LEAK_SANITIZER)
+ lsan_page_allocator_(std::make_unique<v8::base::LsanPageAllocator>(
+ platform_->GetPageAllocator())),
+#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
- caged_heap_(this, platform_->GetPageAllocator()),
+ caged_heap_(this, page_allocator()),
page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
-#else
- page_backend_(
- std::make_unique<PageBackend>(platform_->GetPageAllocator())),
-#endif
+#else // !CPPGC_CAGED_HEAP
+ page_backend_(std::make_unique<PageBackend>(page_allocator())),
+#endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(
std::move(histogram_recorder), platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
@@ -85,6 +83,14 @@ HeapBase::HeapBase(
HeapBase::~HeapBase() = default;
+PageAllocator* HeapBase::page_allocator() const {
+#if defined(LEAK_SANITIZER)
+ return lsan_page_allocator_.get();
+#else // !LEAK_SANITIZER
+ return platform_->GetPageAllocator();
+#endif // !LEAK_SANITIZER
+}
+
size_t HeapBase::ObjectPayloadSize() const {
return ObjectSizeCounter().GetSize(const_cast<RawHeap*>(&raw_heap()));
}
diff --git a/chromium/v8/src/heap/cppgc/heap-base.h b/chromium/v8/src/heap/cppgc/heap-base.h
index f9bdb95c04a..81365417180 100644
--- a/chromium/v8/src/heap/cppgc/heap-base.h
+++ b/chromium/v8/src/heap/cppgc/heap-base.h
@@ -19,6 +19,7 @@
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/process-heap-statistics.h"
+#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/raw-heap.h"
#include "src/heap/cppgc/sweeper.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -27,6 +28,12 @@
#include "src/heap/cppgc/caged-heap.h"
#endif
+namespace v8 {
+namespace base {
+class LsanPageAllocator;
+} // namespace base
+} // namespace v8
+
namespace heap {
namespace base {
class Stack;
@@ -152,6 +159,9 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
size_t ObjectPayloadSize() const;
StackSupport stack_support() const { return stack_support_; }
+ const EmbedderStackState* override_stack_state() const {
+ return override_stack_state_.get();
+ }
void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
@@ -172,6 +182,11 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
stack_state_of_prev_gc_ = stack_state;
}
+ uintptr_t stack_end_of_current_gc() const { return stack_end_of_current_gc_; }
+ void SetStackEndOfCurrentGC(uintptr_t stack_end) {
+ stack_end_of_current_gc_ = stack_end;
+ }
+
void SetInAtomicPauseForTesting(bool value) { in_atomic_pause_ = value; }
virtual void StartIncrementalGarbageCollectionForTesting() = 0;
@@ -189,11 +204,20 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
void ExecutePreFinalizers();
+ PageAllocator* page_allocator() const;
+
RawHeap raw_heap_;
std::shared_ptr<cppgc::Platform> platform_;
+
+#if defined(LEAK_SANITIZER)
+ std::unique_ptr<v8::base::LsanPageAllocator> lsan_page_allocator_;
+#endif // LEAK_SANITIZER
+
+ HeapRegistry::Subscription heap_registry_subscription_{*this};
+
#if defined(CPPGC_CAGED_HEAP)
CagedHeap caged_heap_;
-#endif
+#endif // CPPGC_CAGED_HEAP
std::unique_ptr<PageBackend> page_backend_;
std::unique_ptr<StatsCollector> stats_collector_;
@@ -224,6 +248,10 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
EmbedderStackState::kNoHeapPointers;
std::unique_ptr<EmbedderStackState> override_stack_state_;
+ // Marker that signals end of the interesting stack region in which on-heap
+ // pointers can be found.
+ uintptr_t stack_end_of_current_gc_ = 0;
+
bool in_atomic_pause_ = false;
friend class MarkerBase::IncrementalMarkingTask;
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.cc b/chromium/v8/src/heap/cppgc/heap-object-header.cc
index 0f5530114cb..5ff0e230e7f 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.cc
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.cc
@@ -6,9 +6,9 @@
#include "include/cppgc/internal/api-constants.h"
#include "src/base/macros.h"
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-page.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
namespace internal {
@@ -28,17 +28,17 @@ void HeapObjectHeader::Finalize() {
IsLargeObject()
? LargePage::From(BasePage::FromPayload(this))->ObjectSize()
: ObjectSize();
- ASAN_UNPOISON_MEMORY_REGION(Payload(), size);
+ ASAN_UNPOISON_MEMORY_REGION(ObjectStart(), size);
#endif // V8_USE_ADDRESS_SANITIZER
const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
if (gc_info.finalize) {
- gc_info.finalize(Payload());
+ gc_info.finalize(ObjectStart());
}
}
HeapObjectName HeapObjectHeader::GetName() const {
const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
- return gc_info.name(Payload());
+ return gc_info.name(ObjectStart());
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/heap-object-header.h b/chromium/v8/src/heap/cppgc/heap-object-header.h
index 7e940ca3477..a50d115e52b 100644
--- a/chromium/v8/src/heap/cppgc/heap-object-header.h
+++ b/chromium/v8/src/heap/cppgc/heap-object-header.h
@@ -57,22 +57,22 @@ class HeapObjectHeader {
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
static constexpr uint16_t kLargeObjectSizeInHeader = 0;
- inline static HeapObjectHeader& FromPayload(void* address);
- inline static const HeapObjectHeader& FromPayload(const void* address);
+ inline static HeapObjectHeader& FromObject(void* address);
+ inline static const HeapObjectHeader& FromObject(const void* address);
inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
- // The payload starts directly after the HeapObjectHeader.
- inline Address Payload() const;
+ // The object starts directly after the HeapObjectHeader.
+ inline Address ObjectStart() const;
template <AccessMode mode = AccessMode::kNonAtomic>
- inline Address PayloadEnd() const;
+ inline Address ObjectEnd() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline GCInfoIndex GetGCInfoIndex() const;
template <AccessMode mode = AccessMode::kNonAtomic>
- inline size_t GetSize() const;
- inline void SetSize(size_t size);
+ inline size_t AllocatedSize() const;
+ inline void SetAllocatedSize(size_t size);
template <AccessMode mode = AccessMode::kNonAtomic>
inline size_t ObjectSize() const;
@@ -149,15 +149,15 @@ static_assert(kAllocationGranularity == sizeof(HeapObjectHeader),
"guarantee alignment");
// static
-HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
- return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
+HeapObjectHeader& HeapObjectHeader::FromObject(void* object) {
+ return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(object) -
sizeof(HeapObjectHeader));
}
// static
-const HeapObjectHeader& HeapObjectHeader::FromPayload(const void* payload) {
+const HeapObjectHeader& HeapObjectHeader::FromObject(const void* object) {
return *reinterpret_cast<const HeapObjectHeader*>(
- static_cast<ConstAddress>(payload) - sizeof(HeapObjectHeader));
+ static_cast<ConstAddress>(object) - sizeof(HeapObjectHeader));
}
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
@@ -183,16 +183,16 @@ HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
#endif // DEBUG
}
-Address HeapObjectHeader::Payload() const {
+Address HeapObjectHeader::ObjectStart() const {
return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
sizeof(HeapObjectHeader);
}
template <AccessMode mode>
-Address HeapObjectHeader::PayloadEnd() const {
+Address HeapObjectHeader::ObjectEnd() const {
DCHECK(!IsLargeObject());
return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
- GetSize<mode>();
+ AllocatedSize<mode>();
}
template <AccessMode mode>
@@ -203,7 +203,7 @@ GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
}
template <AccessMode mode>
-size_t HeapObjectHeader::GetSize() const {
+size_t HeapObjectHeader::AllocatedSize() const {
// Size is immutable after construction while either marking or sweeping
// is running so relaxed load (if mode == kAtomic) is enough.
uint16_t encoded_low_value =
@@ -212,19 +212,21 @@ size_t HeapObjectHeader::GetSize() const {
return size;
}
-void HeapObjectHeader::SetSize(size_t size) {
+void HeapObjectHeader::SetAllocatedSize(size_t size) {
DCHECK(!IsMarked());
encoded_low_ = EncodeSize(size);
}
template <AccessMode mode>
size_t HeapObjectHeader::ObjectSize() const {
- return GetSize<mode>() - sizeof(HeapObjectHeader);
+ // The following DCHECK also fails for large objects.
+ DCHECK_GT(AllocatedSize<mode>(), sizeof(HeapObjectHeader));
+ return AllocatedSize<mode>() - sizeof(HeapObjectHeader);
}
template <AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
- return GetSize<mode>() == kLargeObjectSizeInHeader;
+ return AllocatedSize<mode>() == kLargeObjectSizeInHeader;
}
template <AccessMode mode>
@@ -235,7 +237,8 @@ bool HeapObjectHeader::IsInConstruction() const {
}
void HeapObjectHeader::MarkAsFullyConstructed() {
- MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
+ MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(
+ ObjectStart());
}
template <AccessMode mode>
@@ -282,7 +285,7 @@ template <AccessMode mode>
void HeapObjectHeader::Trace(Visitor* visitor) const {
const GCInfo& gc_info =
GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex<mode>());
- return gc_info.trace(visitor, Payload());
+ return gc_info.trace(visitor, ObjectStart());
}
template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
diff --git a/chromium/v8/src/heap/cppgc/heap-page.cc b/chromium/v8/src/heap/cppgc/heap-page.cc
index d573d675ee4..f65b3fed9b8 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.cc
+++ b/chromium/v8/src/heap/cppgc/heap-page.cc
@@ -69,6 +69,11 @@ ConstAddress BasePage::PayloadEnd() const {
return const_cast<BasePage*>(this)->PayloadEnd();
}
+size_t BasePage::AllocatedBytesAtLastGC() const {
+ return is_large() ? LargePage::From(this)->AllocatedBytesAtLastGC()
+ : NormalPage::From(this)->AllocatedBytesAtLastGC();
+}
+
HeapObjectHeader* BasePage::TryObjectHeaderFromInnerAddress(
void* address) const {
return const_cast<HeapObjectHeader*>(
diff --git a/chromium/v8/src/heap/cppgc/heap-page.h b/chromium/v8/src/heap/cppgc/heap-page.h
index 1a66b8593e6..39d5e644ee3 100644
--- a/chromium/v8/src/heap/cppgc/heap-page.h
+++ b/chromium/v8/src/heap/cppgc/heap-page.h
@@ -46,6 +46,10 @@ class V8_EXPORT_PRIVATE BasePage {
Address PayloadEnd();
ConstAddress PayloadEnd() const;
+ // Returns the size of live objects on the page at the last GC.
+ // The counter is update after sweeping.
+ size_t AllocatedBytesAtLastGC() const;
+
// |address| must refer to real object.
template <AccessMode = AccessMode::kNonAtomic>
HeapObjectHeader& ObjectHeaderFromInnerAddress(void* address) const;
@@ -107,7 +111,7 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
bool operator!=(IteratorImpl other) const { return !(*this == other); }
IteratorImpl& operator++() {
- const size_t size = p_->GetSize();
+ const size_t size = p_->AllocatedSize();
DCHECK_EQ(0, (size & (sizeof(T) - 1)));
p_ += (size / sizeof(T));
if (reinterpret_cast<ConstAddress>(p_) == lab_start_) {
@@ -169,6 +173,12 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
return (PayloadStart() <= address) && (address < PayloadEnd());
}
+ size_t AllocatedBytesAtLastGC() const { return allocated_bytes_at_last_gc_; }
+
+ void SetAllocatedBytesAtLastGC(size_t bytes) {
+ allocated_bytes_at_last_gc_ = bytes;
+ }
+
PlatformAwareObjectStartBitmap& object_start_bitmap() {
return object_start_bitmap_;
}
@@ -180,6 +190,7 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
NormalPage(HeapBase* heap, BaseSpace* space);
~NormalPage();
+ size_t allocated_bytes_at_last_gc_ = 0;
PlatformAwareObjectStartBitmap object_start_bitmap_;
};
@@ -210,7 +221,12 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
ConstAddress PayloadEnd() const;
size_t PayloadSize() const { return payload_size_; }
- size_t ObjectSize() const { return payload_size_ - sizeof(HeapObjectHeader); }
+ size_t ObjectSize() const {
+ DCHECK_GT(payload_size_, sizeof(HeapObjectHeader));
+ return payload_size_ - sizeof(HeapObjectHeader);
+ }
+
+ size_t AllocatedBytesAtLastGC() const { return ObjectSize(); }
bool PayloadContains(ConstAddress address) const {
return (PayloadStart() <= address) && (address < PayloadEnd());
@@ -248,7 +264,7 @@ const HeapObjectHeader* ObjectHeaderFromInnerAddressImpl(const BasePage* page,
const HeapObjectHeader* header =
bitmap.FindHeader<mode>(static_cast<ConstAddress>(address));
DCHECK_LT(address, reinterpret_cast<ConstAddress>(header) +
- header->GetSize<AccessMode::kAtomic>());
+ header->AllocatedSize<AccessMode::kAtomic>());
return header;
}
diff --git a/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc b/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
index 961148babd8..ef283e856ad 100644
--- a/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
+++ b/chromium/v8/src/heap/cppgc/heap-statistics-collector.cc
@@ -148,7 +148,7 @@ bool HeapStatisticsCollector::VisitHeapObjectHeader(HeapObjectHeader* header) {
DCHECK_NOT_NULL(current_space_stats_);
DCHECK_NOT_NULL(current_page_stats_);
if (header->IsFree()) return true;
- size_t object_size = header->GetSize();
+ size_t object_size = header->AllocatedSize();
RecordObjectType(current_space_stats_, header, object_size);
current_page_stats_->used_size_bytes += object_size;
return true;
diff --git a/chromium/v8/src/heap/cppgc/heap.cc b/chromium/v8/src/heap/cppgc/heap.cc
index c0c9cec9292..dc127f8e51c 100644
--- a/chromium/v8/src/heap/cppgc/heap.cc
+++ b/chromium/v8/src/heap/cppgc/heap.cc
@@ -176,6 +176,7 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
if (override_stack_state_) {
config_.stack_state = *override_stack_state_;
}
+ SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
in_atomic_pause_ = true;
{
// This guards atomic pause marking, meaning that no internal method or
@@ -188,7 +189,8 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
MarkingVerifier verifier(*this);
- verifier.Run(config_.stack_state);
+ verifier.Run(config_.stack_state, stack_end_of_current_gc(),
+ stats_collector()->marked_bytes());
#endif
subtle::NoGarbageCollectionScope no_gc(*this);
diff --git a/chromium/v8/src/heap/cppgc/liveness-broker.cc b/chromium/v8/src/heap/cppgc/liveness-broker.cc
index 8c61268ba81..d3dd457e056 100644
--- a/chromium/v8/src/heap/cppgc/liveness-broker.cc
+++ b/chromium/v8/src/heap/cppgc/liveness-broker.cc
@@ -9,7 +9,7 @@
namespace cppgc {
bool LivenessBroker::IsHeapObjectAliveImpl(const void* payload) const {
- return internal::HeapObjectHeader::FromPayload(payload).IsMarked();
+ return internal::HeapObjectHeader::FromObject(payload).IsMarked();
}
namespace internal {
diff --git a/chromium/v8/src/heap/cppgc/marker.cc b/chromium/v8/src/heap/cppgc/marker.cc
index d30bb0a8ec2..d26fd580df9 100644
--- a/chromium/v8/src/heap/cppgc/marker.cc
+++ b/chromium/v8/src/heap/cppgc/marker.cc
@@ -191,7 +191,7 @@ MarkerBase::~MarkerBase() {
MarkingWorklists::EphemeronPairItem item;
while (mutator_marking_state_.discovered_ephemeron_pairs_worklist().Pop(
&item)) {
- DCHECK(!HeapObjectHeader::FromPayload(item.key).IsMarked());
+ DCHECK(!HeapObjectHeader::FromObject(item.key).IsMarked());
}
#else
marking_worklists_.discovered_ephemeron_pairs_worklist()->Clear();
@@ -231,6 +231,8 @@ void MarkerBase::StartMarking() {
}
void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
+ StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicMark);
StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
StatsCollector::kMarkAtomicPrologue);
@@ -261,30 +263,38 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
}
void MarkerBase::LeaveAtomicPause() {
- StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
- StatsCollector::kMarkAtomicEpilogue);
- DCHECK(!incremental_marking_handle_);
- ResetRememberedSet(heap());
- heap().stats_collector()->NotifyMarkingCompleted(
- // GetOverallMarkedBytes also includes concurrently marked bytes.
- schedule_.GetOverallMarkedBytes());
- is_marking_ = false;
+ {
+ StatsCollector::EnabledScope top_stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicMark);
+ StatsCollector::EnabledScope stats_scope(
+ heap().stats_collector(), StatsCollector::kMarkAtomicEpilogue);
+ DCHECK(!incremental_marking_handle_);
+ ResetRememberedSet(heap());
+ heap().stats_collector()->NotifyMarkingCompleted(
+ // GetOverallMarkedBytes also includes concurrently marked bytes.
+ schedule_.GetOverallMarkedBytes());
+ is_marking_ = false;
+ }
{
// Weakness callbacks are forbidden from allocating objects.
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_);
ProcessWeakness();
}
+ // TODO(chromium:1056170): It would be better if the call to Unlock was
+ // covered by some cppgc scope.
g_process_mutex.Pointer()->Unlock();
heap().SetStackStateOfPrevGC(config_.stack_state);
}
void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) {
DCHECK(is_marking_);
- StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
- StatsCollector::kAtomicMark);
EnterAtomicPause(stack_state);
- CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
- mutator_marking_state_.Publish();
+ {
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
+ StatsCollector::kAtomicMark);
+ CHECK(AdvanceMarkingWithLimits(v8::base::TimeDelta::Max(), SIZE_MAX));
+ mutator_marking_state_.Publish();
+ }
LeaveAtomicPause();
}
@@ -367,6 +377,10 @@ bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) {
}
void MarkerBase::AdvanceMarkingOnAllocation() {
+ StatsCollector::EnabledScope stats_scope(heap().stats_collector(),
+ StatsCollector::kIncrementalMark);
+ StatsCollector::EnabledScope nested_scope(heap().stats_collector(),
+ StatsCollector::kMarkOnAllocation);
if (AdvanceMarkingWithLimits()) {
// Schedule another incremental task for finalizing without a stack.
ScheduleIncrementalMarkingTask();
@@ -458,7 +472,7 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
mutator_marking_state_.marking_worklist(),
[this](const MarkingWorklists::MarkingItem& item) {
const HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(item.base_object_payload);
+ HeapObjectHeader::FromObject(item.base_object_payload);
DCHECK(!header.IsInConstruction<AccessMode::kNonAtomic>());
DCHECK(header.IsMarked<AccessMode::kNonAtomic>());
mutator_marking_state_.AccountMarkedBytes(header);
diff --git a/chromium/v8/src/heap/cppgc/marking-state.h b/chromium/v8/src/heap/cppgc/marking-state.h
index 6e08fc3e10e..c4627463338 100644
--- a/chromium/v8/src/heap/cppgc/marking-state.h
+++ b/chromium/v8/src/heap/cppgc/marking-state.h
@@ -174,9 +174,9 @@ MarkingStateBase::MarkingStateBase(HeapBase& heap,
void MarkingStateBase::MarkAndPush(const void* object, TraceDescriptor desc) {
DCHECK_NOT_NULL(object);
- MarkAndPush(HeapObjectHeader::FromPayload(
- const_cast<void*>(desc.base_object_payload)),
- desc);
+ MarkAndPush(
+ HeapObjectHeader::FromObject(const_cast<void*>(desc.base_object_payload)),
+ desc);
}
void MarkingStateBase::MarkAndPush(HeapObjectHeader& header,
@@ -202,7 +202,7 @@ bool MarkingStateBase::MarkNoPush(HeapObjectHeader& header) {
void MarkingStateBase::MarkAndPush(HeapObjectHeader& header) {
MarkAndPush(
header,
- {header.Payload(),
+ {header.ObjectStart(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
@@ -222,7 +222,7 @@ void MarkingStateBase::RegisterWeakReferenceIfNeeded(const void* object,
// Filter out already marked values. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
- if (HeapObjectHeader::FromPayload(desc.base_object_payload)
+ if (HeapObjectHeader::FromObject(desc.base_object_payload)
.IsMarked<AccessMode::kAtomic>())
return;
RegisterWeakCallback(weak_callback, parameter);
@@ -245,7 +245,7 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
DCHECK_NOT_NULL(object);
HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(const_cast<void*>(object));
+ HeapObjectHeader::FromObject(const_cast<void*>(object));
if (header.IsInConstruction<AccessMode::kAtomic>()) {
not_fully_constructed_worklist_.Push<AccessMode::kAtomic>(&header);
@@ -255,6 +255,7 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
// Only mark the container initially. Its buckets will be processed after
// marking.
if (!MarkNoPush(header)) return;
+
RegisterWeakContainer(header);
// Register final weak processing of the backing store.
@@ -264,7 +265,13 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
// the TraceDescriptor will be nullptr. For ephemerons the callback will be
// non-nullptr so that the container is traced and the ephemeron pairs are
// processed.
- if (desc.callback) PushMarked(header, desc);
+ if (desc.callback) {
+ PushMarked(header, desc);
+ } else {
+ // For weak containers, there's no trace callback and no processing loop to
+ // update the marked bytes, hence inline that here.
+ AccountMarkedBytes(header);
+ }
}
void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
@@ -273,7 +280,7 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
// Filter out already marked keys. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
- if (HeapObjectHeader::FromPayload(key).IsMarked<AccessMode::kAtomic>()) {
+ if (HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>()) {
if (value_desc.base_object_payload) {
MarkAndPush(value_desc.base_object_payload, value_desc);
} else {
@@ -291,7 +298,7 @@ void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
header.IsLargeObject<AccessMode::kAtomic>()
? reinterpret_cast<const LargePage*>(BasePage::FromPayload(&header))
->PayloadSize()
- : header.GetSize<AccessMode::kAtomic>());
+ : header.AllocatedSize<AccessMode::kAtomic>());
}
void MarkingStateBase::AccountMarkedBytes(size_t marked_bytes) {
@@ -308,7 +315,7 @@ class MutatorMarkingState : public MarkingStateBase {
return MutatorMarkingState::MarkingStateBase::MarkNoPush(header);
}
- inline void PushMarkedWeakContainer(HeapObjectHeader&);
+ inline void ReTraceMarkedWeakContainer(cppgc::Visitor&, HeapObjectHeader&);
inline void DynamicallyMarkAddress(ConstAddress);
@@ -343,13 +350,13 @@ class MutatorMarkingState : public MarkingStateBase {
} recently_retraced_weak_containers_;
};
-void MutatorMarkingState::PushMarkedWeakContainer(HeapObjectHeader& header) {
+void MutatorMarkingState::ReTraceMarkedWeakContainer(cppgc::Visitor& visitor,
+ HeapObjectHeader& header) {
DCHECK(weak_containers_worklist_.Contains(&header));
recently_retraced_weak_containers_.Insert(&header);
- PushMarked(
- header,
- {header.Payload(),
- GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
+ // Don't push to the marking worklist to avoid double accounting of marked
+ // bytes as the container is already accounted for.
+ header.Trace(&visitor);
}
void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
@@ -359,7 +366,7 @@ void MutatorMarkingState::DynamicallyMarkAddress(ConstAddress address) {
DCHECK(!header.IsInConstruction());
if (MarkNoPush(header)) {
marking_worklist_.Push(
- {reinterpret_cast<void*>(header.Payload()),
+ {reinterpret_cast<void*>(header.ObjectStart()),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
}
@@ -371,7 +378,7 @@ void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
// the callback instead of registering it.
#if DEBUG
const HeapObjectHeader& header =
- HeapObjectHeader::FromPayload(desc.base_object_payload);
+ HeapObjectHeader::FromObject(desc.base_object_payload);
DCHECK_IMPLIES(header.IsInConstruction(), header.IsMarked());
#endif // DEBUG
weak_callback(LivenessBrokerFactory::Create(), parameter);
diff --git a/chromium/v8/src/heap/cppgc/marking-verifier.cc b/chromium/v8/src/heap/cppgc/marking-verifier.cc
index 42e3c4eb3e8..2bbf8878e42 100644
--- a/chromium/v8/src/heap/cppgc/marking-verifier.cc
+++ b/chromium/v8/src/heap/cppgc/marking-verifier.cc
@@ -9,50 +9,81 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/object-view.h"
namespace cppgc {
namespace internal {
MarkingVerifierBase::MarkingVerifierBase(
- HeapBase& heap, std::unique_ptr<cppgc::Visitor> visitor)
+ HeapBase& heap, VerificationState& verification_state,
+ std::unique_ptr<cppgc::Visitor> visitor)
: ConservativeTracingVisitor(heap, *heap.page_backend(), *visitor.get()),
+ verification_state_(verification_state),
visitor_(std::move(visitor)) {}
-void MarkingVerifierBase::Run(Heap::Config::StackState stack_state) {
+void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
+ uintptr_t stack_end,
+ size_t expected_marked_bytes) {
Traverse(&heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
- heap_.stack()->IteratePointers(this);
- CHECK_EQ(in_construction_objects_stack_, in_construction_objects_heap_);
+ heap_.stack()->IteratePointersUnsafe(this, stack_end);
+ // The objects found through the unsafe iteration are only a subset of the
+ // regular iteration as they miss objects held alive only from callee-saved
+ // registers that are never pushed on the stack and SafeStack.
+ CHECK_LE(in_construction_objects_stack_.size(),
+ in_construction_objects_heap_.size());
+ for (auto* header : in_construction_objects_stack_) {
+ CHECK_NE(in_construction_objects_heap_.end(),
+ in_construction_objects_heap_.find(header));
+ }
}
+#ifdef CPPGC_VERIFY_LIVE_BYTES
+ CHECK_EQ(expected_marked_bytes, found_marked_bytes_);
+#endif // CPPGC_VERIFY_LIVE_BYTES
}
void VerificationState::VerifyMarked(const void* base_object_payload) const {
const HeapObjectHeader& child_header =
- HeapObjectHeader::FromPayload(base_object_payload);
+ HeapObjectHeader::FromObject(base_object_payload);
if (!child_header.IsMarked()) {
FATAL(
"MarkingVerifier: Encountered unmarked object.\n"
"#\n"
"# Hint:\n"
- "# %s\n"
- "# \\-> %s",
- parent_->GetName().value, child_header.GetName().value);
+ "# %s (%p)\n"
+ "# \\-> %s (%p)",
+ parent_ ? parent_->GetName().value : "Stack",
+ parent_ ? parent_->ObjectStart() : nullptr,
+ child_header.GetName().value, child_header.ObjectStart());
}
}
void MarkingVerifierBase::VisitInConstructionConservatively(
HeapObjectHeader& header, TraceConservativelyCallback callback) {
- CHECK(header.IsMarked());
if (in_construction_objects_->find(&header) !=
in_construction_objects_->end())
return;
in_construction_objects_->insert(&header);
+
+ // Stack case: Parent is stack and this is merely ensuring that the object
+ // itself is marked. If the object is marked, then it is being processed by
+ // the on-heap phase.
+ if (verification_state_.IsParentOnStack()) {
+ verification_state_.VerifyMarked(header.ObjectStart());
+ return;
+ }
+
+ // Heap case: Dispatching parent object that must be marked (pre-condition).
+ CHECK(header.IsMarked());
callback(this, header);
}
void MarkingVerifierBase::VisitPointer(const void* address) {
+ // Entry point for stack walk. The conservative visitor dispatches as follows:
+ // - Fully constructed objects: Visit()
+ // - Objects in construction: VisitInConstructionConservatively()
TraceConservativelyIfNeeded(address);
}
@@ -62,7 +93,7 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader* header) {
DCHECK(!header->IsFree());
- SetCurrentParent(header);
+ verification_state_.SetCurrentParent(header);
if (!header->IsInConstruction()) {
header->Trace(visitor_.get());
@@ -71,6 +102,10 @@ bool MarkingVerifierBase::VisitHeapObjectHeader(HeapObjectHeader* header) {
TraceConservativelyIfNeeded(*header);
}
+ found_marked_bytes_ += ObjectView(*header).Size() + sizeof(HeapObjectHeader);
+
+ verification_state_.SetCurrentParent(nullptr);
+
return true;
}
@@ -112,12 +147,8 @@ class VerificationVisitor final : public cppgc::Visitor {
} // namespace
MarkingVerifier::MarkingVerifier(HeapBase& heap_base)
- : MarkingVerifierBase(heap_base,
+ : MarkingVerifierBase(heap_base, state_,
std::make_unique<VerificationVisitor>(state_)) {}
-void MarkingVerifier::SetCurrentParent(const HeapObjectHeader* parent) {
- state_.SetCurrentParent(parent);
-}
-
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/marking-verifier.h b/chromium/v8/src/heap/cppgc/marking-verifier.h
index eeced684497..95475f5191a 100644
--- a/chromium/v8/src/heap/cppgc/marking-verifier.h
+++ b/chromium/v8/src/heap/cppgc/marking-verifier.h
@@ -21,6 +21,9 @@ class VerificationState {
void VerifyMarked(const void*) const;
void SetCurrentParent(const HeapObjectHeader* header) { parent_ = header; }
+ // No parent means parent was on stack.
+ bool IsParentOnStack() const { return !parent_; }
+
private:
const HeapObjectHeader* parent_ = nullptr;
};
@@ -37,12 +40,11 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(Heap::Config::StackState);
+ void Run(Heap::Config::StackState, uintptr_t, size_t);
protected:
- MarkingVerifierBase(HeapBase&, std::unique_ptr<cppgc::Visitor>);
-
- virtual void SetCurrentParent(const HeapObjectHeader*) = 0;
+ MarkingVerifierBase(HeapBase&, VerificationState&,
+ std::unique_ptr<cppgc::Visitor>);
private:
void VisitInConstructionConservatively(HeapObjectHeader&,
@@ -51,12 +53,14 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
bool VisitHeapObjectHeader(HeapObjectHeader*);
+ VerificationState& verification_state_;
std::unique_ptr<cppgc::Visitor> visitor_;
std::unordered_set<const HeapObjectHeader*> in_construction_objects_heap_;
std::unordered_set<const HeapObjectHeader*> in_construction_objects_stack_;
std::unordered_set<const HeapObjectHeader*>* in_construction_objects_ =
&in_construction_objects_heap_;
+ size_t found_marked_bytes_ = 0;
};
class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
@@ -64,8 +68,6 @@ class V8_EXPORT_PRIVATE MarkingVerifier final : public MarkingVerifierBase {
explicit MarkingVerifier(HeapBase&);
~MarkingVerifier() final = default;
- void SetCurrentParent(const HeapObjectHeader*) final;
-
private:
VerificationState state_;
};
diff --git a/chromium/v8/src/heap/cppgc/marking-visitor.cc b/chromium/v8/src/heap/cppgc/marking-visitor.cc
index fb51ccc303e..a740d33a841 100644
--- a/chromium/v8/src/heap/cppgc/marking-visitor.cc
+++ b/chromium/v8/src/heap/cppgc/marking-visitor.cc
@@ -56,7 +56,7 @@ void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
if (header.IsMarked()) {
if (marking_state_.IsMarkedWeakContainer(header))
- marking_state_.PushMarkedWeakContainer(header);
+ marking_state_.ReTraceMarkedWeakContainer(visitor_, header);
return;
}
ConservativeTracingVisitor::VisitFullyConstructedConservatively(header);
diff --git a/chromium/v8/src/heap/cppgc/memory.cc b/chromium/v8/src/heap/cppgc/memory.cc
new file mode 100644
index 00000000000..aa3baeaa8a0
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/memory.cc
@@ -0,0 +1,22 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/memory.h"
+
+#include <cstddef>
+
+#include "src/heap/cppgc/globals.h"
+
+namespace cppgc {
+namespace internal {
+
+void NoSanitizeMemset(void* address, char c, size_t bytes) {
+ volatile Address base = reinterpret_cast<Address>(address);
+ for (size_t i = 0; i < bytes; ++i) {
+ base[i] = c;
+ }
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/memory.h b/chromium/v8/src/heap/cppgc/memory.h
new file mode 100644
index 00000000000..d31af33ee3f
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/memory.h
@@ -0,0 +1,76 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_MEMORY_H_
+#define V8_HEAP_CPPGC_MEMORY_H_
+
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#include "src/base/macros.h"
+#include "src/base/sanitizer/asan.h"
+#include "src/base/sanitizer/msan.h"
+
+namespace cppgc {
+namespace internal {
+
+V8_NOINLINE DISABLE_ASAN void NoSanitizeMemset(void* address, char c,
+ size_t bytes);
+
+inline void ZapMemory(void* address, size_t size) {
+ // The lowest bit of the zapped value should be 0 so that zapped object are
+ // never viewed as fully constructed objects.
+ static constexpr uint8_t kZappedValue = 0xdc;
+ memset(address, kZappedValue, size);
+}
+
+// Together `SetMemoryAccessible()` and `SetMemoryInaccessible()` form the
+// memory access model for allocation and free.
+V8_INLINE void SetMemoryAccessible(void* address, size_t size) {
+#if defined(V8_USE_MEMORY_SANITIZER)
+
+ MSAN_MEMORY_IS_INITIALIZED(address, size);
+
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+
+ ASAN_UNPOISON_MEMORY_REGION(address, size);
+
+#elif DEBUG
+
+ memset(address, 0, size);
+
+#else // Release builds.
+
+ // Nothing to be done for release builds.
+
+#endif // Release builds.
+}
+
+V8_INLINE void SetMemoryInaccessible(void* address, size_t size) {
+#if defined(V8_USE_MEMORY_SANITIZER)
+
+ memset(address, 0, size);
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(address, size);
+
+#elif defined(V8_USE_ADDRESS_SANITIZER)
+
+ NoSanitizeMemset(address, 0, size);
+ ASAN_POISON_MEMORY_REGION(address, size);
+
+#elif DEBUG
+
+ ::cppgc::internal::ZapMemory(address, size);
+
+#else // Release builds.
+
+ memset(address, 0, size);
+
+#endif // Release builds.
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_MEMORY_H_
diff --git a/chromium/v8/src/heap/cppgc/metric-recorder.h b/chromium/v8/src/heap/cppgc/metric-recorder.h
index 6e9d4d0787c..6118627d01d 100644
--- a/chromium/v8/src/heap/cppgc/metric-recorder.h
+++ b/chromium/v8/src/heap/cppgc/metric-recorder.h
@@ -14,7 +14,7 @@ class StatsCollector;
/**
* Base class used for reporting GC statistics histograms. Embedders interested
- * in collecting histgorams should implement the virtual AddMainThreadEvent
+ * in collecting histograms should implement the virtual AddMainThreadEvent
* methods below and pass an instance of the implementation during Heap
* creation.
*/
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.cc b/chromium/v8/src/heap/cppgc/object-allocator.cc
index 366900b0f92..1197356c29d 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.cc
+++ b/chromium/v8/src/heap/cppgc/object-allocator.cc
@@ -95,7 +95,7 @@ void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
stats_collector->NotifyAllocation(size);
MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
- return header->Payload();
+ return header->ObjectStart();
}
} // namespace
diff --git a/chromium/v8/src/heap/cppgc/object-allocator.h b/chromium/v8/src/heap/cppgc/object-allocator.h
index 56faef1c833..dd99d83ba56 100644
--- a/chromium/v8/src/heap/cppgc/object-allocator.h
+++ b/chromium/v8/src/heap/cppgc/object-allocator.h
@@ -12,9 +12,9 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
@@ -111,10 +111,10 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
#if !defined(V8_USE_MEMORY_SANITIZER) && !defined(V8_USE_ADDRESS_SANITIZER) && \
DEBUG
// For debug builds, unzap only the payload.
- SET_MEMORY_ACCESSIBLE(static_cast<char*>(raw) + sizeof(HeapObjectHeader),
- size - sizeof(HeapObjectHeader));
+ SetMemoryAccessible(static_cast<char*>(raw) + sizeof(HeapObjectHeader),
+ size - sizeof(HeapObjectHeader));
#else
- SET_MEMORY_ACCESSIBLE(raw, size);
+ SetMemoryAccessible(raw, size);
#endif
auto* header = new (raw) HeapObjectHeader(size, gcinfo);
@@ -123,7 +123,7 @@ void* ObjectAllocator::AllocateObjectOnSpace(NormalPageSpace* space,
->object_start_bitmap()
.SetBit<AccessMode::kAtomic>(reinterpret_cast<ConstAddress>(header));
- return header->Payload();
+ return header->ObjectStart();
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/object-poisoner.h b/chromium/v8/src/heap/cppgc/object-poisoner.h
index fd2462d6694..632dea9b9d7 100644
--- a/chromium/v8/src/heap/cppgc/object-poisoner.h
+++ b/chromium/v8/src/heap/cppgc/object-poisoner.h
@@ -5,10 +5,10 @@
#ifndef V8_HEAP_CPPGC_OBJECT_POISONER_H_
#define V8_HEAP_CPPGC_OBJECT_POISONER_H_
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
namespace internal {
@@ -27,7 +27,7 @@ class UnmarkedObjectsPoisoner : public HeapVisitor<UnmarkedObjectsPoisoner> {
header->IsLargeObject()
? LargePage::From(BasePage::FromPayload(header))->ObjectSize()
: header->ObjectSize();
- ASAN_POISON_MEMORY_REGION(header->Payload(), size);
+ ASAN_POISON_MEMORY_REGION(header->ObjectStart(), size);
return true;
}
};
diff --git a/chromium/v8/src/heap/cppgc/object-size-trait.cc b/chromium/v8/src/heap/cppgc/object-size-trait.cc
index 11c50b3c4d1..7b82239a610 100644
--- a/chromium/v8/src/heap/cppgc/object-size-trait.cc
+++ b/chromium/v8/src/heap/cppgc/object-size-trait.cc
@@ -6,6 +6,7 @@
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-view.h"
namespace cppgc {
namespace internal {
@@ -13,11 +14,7 @@ namespace internal {
// static
size_t BaseObjectSizeTrait::GetObjectSizeForGarbageCollected(
const void* object) {
- const auto& header = HeapObjectHeader::FromPayload(object);
- return header.IsLargeObject()
- ? static_cast<const LargePage*>(BasePage::FromPayload(&header))
- ->ObjectSize()
- : header.ObjectSize();
+ return ObjectView(HeapObjectHeader::FromObject(object)).Size();
}
// static
diff --git a/chromium/v8/src/heap/cppgc/object-view.h b/chromium/v8/src/heap/cppgc/object-view.h
new file mode 100644
index 00000000000..e83145cc319
--- /dev/null
+++ b/chromium/v8/src/heap/cppgc/object-view.h
@@ -0,0 +1,54 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_OBJECT_VIEW_H_
+#define V8_HEAP_CPPGC_OBJECT_VIEW_H_
+
+#include "include/v8config.h"
+#include "src/heap/cppgc/heap-object-header.h"
+#include "src/heap/cppgc/heap-page.h"
+
+namespace cppgc {
+namespace internal {
+
+// ObjectView allows accessing a header within the bounds of the actual object.
+// It is not exposed externally and does not keep the underlying object alive.
+class ObjectView final {
+ public:
+ V8_INLINE explicit ObjectView(const HeapObjectHeader& header);
+
+ V8_INLINE Address Start() const;
+ V8_INLINE ConstAddress End() const;
+ V8_INLINE size_t Size() const;
+
+ private:
+ const HeapObjectHeader& header_;
+ const BasePage* base_page_;
+ const bool is_large_object_;
+};
+
+ObjectView::ObjectView(const HeapObjectHeader& header)
+ : header_(header),
+ base_page_(
+ BasePage::FromPayload(const_cast<HeapObjectHeader*>(&header_))),
+ is_large_object_(header_.IsLargeObject()) {
+ DCHECK_EQ(Start() + Size(), End());
+}
+
+Address ObjectView::Start() const { return header_.ObjectStart(); }
+
+ConstAddress ObjectView::End() const {
+ return is_large_object_ ? LargePage::From(base_page_)->PayloadEnd()
+ : header_.ObjectEnd();
+}
+
+size_t ObjectView::Size() const {
+ return is_large_object_ ? LargePage::From(base_page_)->ObjectSize()
+ : header_.ObjectSize();
+}
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_OBJECT_VIEW_H_
diff --git a/chromium/v8/src/heap/cppgc/page-memory.cc b/chromium/v8/src/heap/cppgc/page-memory.cc
index 76b9458517e..49b44aff91c 100644
--- a/chromium/v8/src/heap/cppgc/page-memory.cc
+++ b/chromium/v8/src/heap/cppgc/page-memory.cc
@@ -5,7 +5,7 @@
#include "src/heap/cppgc/page-memory.h"
#include "src/base/macros.h"
-#include "src/heap/cppgc/sanitizers.h"
+#include "src/base/sanitizer/asan.h"
namespace cppgc {
namespace internal {
diff --git a/chromium/v8/src/heap/cppgc/pointer-policies.cc b/chromium/v8/src/heap/cppgc/pointer-policies.cc
index fdc435af17c..23ad552c7aa 100644
--- a/chromium/v8/src/heap/cppgc/pointer-policies.cc
+++ b/chromium/v8/src/heap/cppgc/pointer-policies.cc
@@ -4,21 +4,70 @@
#include "include/cppgc/internal/pointer-policies.h"
+#include "include/cppgc/internal/caged-heap-local-data.h"
#include "include/cppgc/internal/persistent-node.h"
+#include "src/base/logging.h"
#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap.h"
+#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/process-heap.h"
namespace cppgc {
namespace internal {
-EnabledCheckingPolicy::EnabledCheckingPolicy() {
- USE(impl_);
- // TODO(chromium:1056170): Save creating heap state.
+namespace {
+
+#if defined(DEBUG)
+bool IsOnStack(const void* address) {
+ return v8::base::Stack::GetCurrentStackPosition() <= address &&
+ address < v8::base::Stack::GetStackStart();
}
+#endif // defined(DEBUG)
+
+} // namespace
+
+void EnabledCheckingPolicy::CheckPointerImpl(const void* ptr,
+ bool points_to_payload) {
+ // `ptr` must not reside on stack.
+ DCHECK(!IsOnStack(ptr));
+ auto* base_page = BasePage::FromPayload(ptr);
+ // Large objects do not support mixins. This also means that `base_page` is
+ // valid for large objects.
+ DCHECK_IMPLIES(base_page->is_large(), points_to_payload);
+
+ // References cannot change their heap association which means that state is
+ // immutable once it is set.
+ if (!heap_) {
+ heap_ = base_page->heap();
+ if (!heap_->page_backend()->Lookup(reinterpret_cast<Address>(this))) {
+ // If `this` is not contained within the heap of `ptr`, we must deal with
+ // an on-stack or off-heap reference. For both cases there should be no
+ // heap registered.
+ CHECK(!HeapRegistry::TryFromManagedPointer(this));
+ }
+ }
+
+ // Member references should never mix heaps.
+ DCHECK_EQ(heap_, base_page->heap());
+
+ // Header checks.
+ const HeapObjectHeader* header = nullptr;
+ if (points_to_payload) {
+ header = &HeapObjectHeader::FromObject(ptr);
+ } else if (!heap_->sweeper().IsSweepingInProgress()) {
+ // Mixin case.
+ header = &base_page->ObjectHeaderFromInnerAddress(ptr);
+ DCHECK_LE(header->ObjectStart(), ptr);
+ DCHECK_GT(header->ObjectEnd(), ptr);
+ }
+ if (header) {
+ DCHECK(!header->IsFree());
+ }
-void EnabledCheckingPolicy::CheckPointer(const void* ptr) {
- // TODO(chromium:1056170): Provide implementation.
+ // TODO(v8:11749): Check mark bits when during pre-finalizer phase.
}
PersistentRegion& StrongPersistentPolicy::GetPersistentRegion(
diff --git a/chromium/v8/src/heap/cppgc/process-heap-statistics.h b/chromium/v8/src/heap/cppgc/process-heap-statistics.h
index 2d7bfa117f8..da7683b616a 100644
--- a/chromium/v8/src/heap/cppgc/process-heap-statistics.h
+++ b/chromium/v8/src/heap/cppgc/process-heap-statistics.h
@@ -15,7 +15,7 @@ class ProcessHeapStatisticsUpdater {
public:
// Allocation observer implementation for heaps should register to contribute
// to ProcessHeapStatistics. The heap is responsible for allocating and
- // registering the obsrever impl with its stats collector.
+ // registering the observer impl with its stats collector.
class AllocationObserverImpl final
: public StatsCollector::AllocationObserver {
public:
diff --git a/chromium/v8/src/heap/cppgc/process-heap.cc b/chromium/v8/src/heap/cppgc/process-heap.cc
index e084ea1264b..6f8bb05c6cf 100644
--- a/chromium/v8/src/heap/cppgc/process-heap.cc
+++ b/chromium/v8/src/heap/cppgc/process-heap.cc
@@ -4,10 +4,66 @@
#include "src/heap/cppgc/process-heap.h"
+#include <algorithm>
+#include <vector>
+
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+#include "src/heap/cppgc/heap-base.h"
+#include "src/heap/cppgc/page-memory.h"
+
namespace cppgc {
namespace internal {
v8::base::LazyMutex g_process_mutex = LAZY_MUTEX_INITIALIZER;
+namespace {
+
+v8::base::LazyMutex g_heap_registry_mutex = LAZY_MUTEX_INITIALIZER;
+
+HeapRegistry::Storage& GetHeapRegistryStorage() {
+ static v8::base::LazyInstance<HeapRegistry::Storage>::type heap_registry =
+ LAZY_INSTANCE_INITIALIZER;
+ return *heap_registry.Pointer();
+}
+
+} // namespace
+
+// static
+void HeapRegistry::RegisterHeap(HeapBase& heap) {
+ v8::base::MutexGuard guard(g_heap_registry_mutex.Pointer());
+
+ auto& storage = GetHeapRegistryStorage();
+ DCHECK_EQ(storage.end(), std::find(storage.begin(), storage.end(), &heap));
+ storage.push_back(&heap);
+}
+
+// static
+void HeapRegistry::UnregisterHeap(HeapBase& heap) {
+ v8::base::MutexGuard guard(g_heap_registry_mutex.Pointer());
+
+ auto& storage = GetHeapRegistryStorage();
+ const auto pos = std::find(storage.begin(), storage.end(), &heap);
+ DCHECK_NE(storage.end(), pos);
+ storage.erase(pos);
+}
+
+// static
+HeapBase* HeapRegistry::TryFromManagedPointer(const void* needle) {
+ v8::base::MutexGuard guard(g_heap_registry_mutex.Pointer());
+
+ for (auto* heap : GetHeapRegistryStorage()) {
+ const auto address =
+ heap->page_backend()->Lookup(reinterpret_cast<ConstAddress>(needle));
+ if (address) return heap;
+ }
+ return nullptr;
+}
+
+// static
+const HeapRegistry::Storage& HeapRegistry::GetRegisteredHeapsForTesting() {
+ return GetHeapRegistryStorage();
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/process-heap.h b/chromium/v8/src/heap/cppgc/process-heap.h
index 8afc7c88eb5..c581bad29c5 100644
--- a/chromium/v8/src/heap/cppgc/process-heap.h
+++ b/chromium/v8/src/heap/cppgc/process-heap.h
@@ -5,13 +5,48 @@
#ifndef V8_HEAP_CPPGC_PROCESS_HEAP_H_
#define V8_HEAP_CPPGC_PROCESS_HEAP_H_
+#include <vector>
+
+#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
namespace cppgc {
namespace internal {
+class HeapBase;
+
extern v8::base::LazyMutex g_process_mutex;
+class V8_EXPORT_PRIVATE HeapRegistry final {
+ public:
+ using Storage = std::vector<HeapBase*>;
+
+ class Subscription final {
+ public:
+ inline explicit Subscription(HeapBase&);
+ inline ~Subscription();
+
+ private:
+ HeapBase& heap_;
+ };
+
+ static HeapBase* TryFromManagedPointer(const void* needle);
+
+ static const Storage& GetRegisteredHeapsForTesting();
+
+ private:
+ static void RegisterHeap(HeapBase&);
+ static void UnregisterHeap(HeapBase&);
+};
+
+HeapRegistry::Subscription::Subscription(HeapBase& heap) : heap_(heap) {
+ HeapRegistry::RegisterHeap(heap_);
+}
+
+HeapRegistry::Subscription::~Subscription() {
+ HeapRegistry::UnregisterHeap(heap_);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/sanitizers.h b/chromium/v8/src/heap/cppgc/sanitizers.h
deleted file mode 100644
index c3a8ff684d7..00000000000
--- a/chromium/v8/src/heap/cppgc/sanitizers.h
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_CPPGC_SANITIZERS_H_
-#define V8_HEAP_CPPGC_SANITIZERS_H_
-
-#include <stdint.h>
-#include <string.h>
-
-#include "src/base/macros.h"
-
-//
-// TODO(chromium:1056170): Find a place in base for sanitizer support.
-//
-
-#ifdef V8_USE_ADDRESS_SANITIZER
-
-#include <sanitizer/asan_interface.h>
-
-#define NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
-#if !defined(ASAN_POISON_MEMORY_REGION) || !defined(ASAN_UNPOISON_MEMORY_REGION)
-#error "ASAN_POISON_MEMORY_REGION must be defined"
-#endif
-
-#else // !V8_USE_ADDRESS_SANITIZER
-
-#define NO_SANITIZE_ADDRESS
-#define ASAN_POISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) ((void)(addr), (void)(size))
-
-#endif // V8_USE_ADDRESS_SANITIZER
-
-#ifdef V8_USE_MEMORY_SANITIZER
-
-#include <sanitizer/msan_interface.h>
-
-#define MSAN_POISON(addr, size) __msan_allocated_memory(addr, size)
-#define MSAN_UNPOISON(addr, size) __msan_unpoison(addr, size)
-
-#else // !V8_USE_MEMORY_SANITIZER
-
-#define MSAN_POISON(addr, size) ((void)(addr), (void)(size))
-#define MSAN_UNPOISON(addr, size) ((void)(addr), (void)(size))
-
-#endif // V8_USE_MEMORY_SANITIZER
-
-// API for newly allocated or reclaimed memory.
-#if defined(V8_USE_MEMORY_SANITIZER)
-#define SET_MEMORY_ACCESSIBLE(address, size) MSAN_UNPOISON(address, size);
-#define SET_MEMORY_INACCESSIBLE(address, size) \
- memset((address), 0, (size)); \
- MSAN_POISON((address), (size))
-#elif defined(V8_USE_ADDRESS_SANITIZER)
-#define SET_MEMORY_ACCESSIBLE(address, size) \
- ASAN_UNPOISON_MEMORY_REGION(address, size);
-#define SET_MEMORY_INACCESSIBLE(address, size) \
- memset((address), 0, (size)); \
- ASAN_POISON_MEMORY_REGION(address, size)
-#elif DEBUG
-#define SET_MEMORY_ACCESSIBLE(address, size) memset((address), 0, (size))
-#define SET_MEMORY_INACCESSIBLE(address, size) \
- ::cppgc::internal::ZapMemory((address), (size));
-#else
-#define SET_MEMORY_ACCESSIBLE(address, size) ((void)(address), (void)(size))
-#define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size))
-#endif
-
-namespace cppgc {
-namespace internal {
-
-inline void ZapMemory(void* address, size_t size) {
- // The lowest bit of the zapped value should be 0 so that zapped object
- // are never viewed as fully constructed objects.
- static constexpr uint8_t kZappedValue = 0xdc;
- memset(address, kZappedValue, size);
-}
-
-} // namespace internal
-} // namespace cppgc
-
-#endif // V8_HEAP_CPPGC_SANITIZERS_H_
diff --git a/chromium/v8/src/heap/cppgc/stats-collector.h b/chromium/v8/src/heap/cppgc/stats-collector.h
index 2a8583c7304..4709f227035 100644
--- a/chromium/v8/src/heap/cppgc/stats-collector.h
+++ b/chromium/v8/src/heap/cppgc/stats-collector.h
@@ -39,6 +39,7 @@ namespace internal {
V(MarkTransitiveClosure) \
V(MarkTransitiveClosureWithDeadline) \
V(MarkFlushEphemerons) \
+ V(MarkOnAllocation) \
V(MarkProcessBailOutObjects) \
V(MarkProcessMarkingWorklist) \
V(MarkProcessWriteBarrierWorklist) \
@@ -52,6 +53,7 @@ namespace internal {
V(MarkVisitRememberedSets) \
V(SweepInvokePreFinalizers) \
V(SweepIdleStep) \
+ V(SweepInTask) \
V(SweepOnAllocation) \
V(SweepFinalize)
@@ -256,7 +258,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
void NotifyAllocation(size_t);
void NotifyExplicitFree(size_t);
- // Safepoints should only be invoked when garabge collections are possible.
+ // Safepoints should only be invoked when garbage collections are possible.
// This is necessary as increments and decrements are reported as close to
// their actual allocation/reclamation as possible.
void NotifySafePointForConservativeCollection();
diff --git a/chromium/v8/src/heap/cppgc/sweeper.cc b/chromium/v8/src/heap/cppgc/sweeper.cc
index 937a52afc59..3e740f7924a 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.cc
+++ b/chromium/v8/src/heap/cppgc/sweeper.cc
@@ -18,10 +18,10 @@
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-space.h"
#include "src/heap/cppgc/heap-visitor.h"
+#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-poisoner.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/raw-heap.h"
-#include "src/heap/cppgc/sanitizers.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/task-handle.h"
@@ -133,7 +133,7 @@ class InlinedFinalizationBuilder final {
void AddFinalizer(HeapObjectHeader* header, size_t size) {
header->Finalize();
- SET_MEMORY_INACCESSIBLE(header, size);
+ SetMemoryInaccessible(header, size);
}
void AddFreeListEntry(Address start, size_t size) {
@@ -161,10 +161,7 @@ class DeferredFinalizationBuilder final {
result_.unfinalized_objects.push_back({header});
found_finalizer_ = true;
} else {
- // Unmarked memory may have been poisoned. In the non-concurrent case this
- // is taken care of by finalizing a header.
- ASAN_UNPOISON_MEMORY_REGION(header, size);
- SET_MEMORY_INACCESSIBLE(header, size);
+ SetMemoryInaccessible(header, size);
}
}
@@ -197,15 +194,16 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
bitmap.Clear();
size_t largest_new_free_list_entry = 0;
+ size_t live_bytes = 0;
Address start_of_gap = page->PayloadStart();
for (Address begin = page->PayloadStart(), end = page->PayloadEnd();
begin != end;) {
HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(begin);
- const size_t size = header->GetSize();
+ const size_t size = header->AllocatedSize();
// Check if this is a free list entry.
if (header->IsFree<kAtomicAccess>()) {
- SET_MEMORY_INACCESSIBLE(header, std::min(kFreeListEntrySize, size));
+ SetMemoryInaccessible(header, std::min(kFreeListEntrySize, size));
begin += size;
continue;
}
@@ -229,6 +227,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
bitmap.SetBit(begin);
begin += size;
start_of_gap = begin;
+ live_bytes += size;
}
if (start_of_gap != page->PayloadStart() &&
@@ -237,6 +236,7 @@ typename FinalizationBuilder::ResultType SweepNormalPage(NormalPage* page) {
start_of_gap, static_cast<size_t>(page->PayloadEnd() - start_of_gap));
bitmap.SetBit(start_of_gap);
}
+ page->SetAllocatedBytesAtLastGC(live_bytes);
const bool is_empty = (start_of_gap == page->PayloadStart());
return builder.GetResult(is_empty, largest_new_free_list_entry);
@@ -290,9 +290,9 @@ class SweepFinalizer final {
// Call finalizers.
for (HeapObjectHeader* object : page_state->unfinalized_objects) {
- const size_t size = object->GetSize();
+ const size_t size = object->AllocatedSize();
object->Finalize();
- SET_MEMORY_INACCESSIBLE(object, size);
+ SetMemoryInaccessible(object, size);
}
// Unmap page if empty.
@@ -665,6 +665,33 @@ class Sweeper::SweeperImpl final {
bool IsSweepingInProgress() const { return is_in_progress_; }
+ bool PerformSweepOnMutatorThread(double deadline_in_seconds,
+ StatsCollector::ScopeId internal_scope_id) {
+ if (!is_in_progress_) return true;
+
+ MutatorThreadSweepingScope sweeping_in_progresss(*this);
+
+ bool sweep_complete;
+ {
+ StatsCollector::EnabledScope stats_scope(
+ stats_collector_, StatsCollector::kIncrementalSweep);
+
+ MutatorThreadSweeper sweeper(&space_states_, platform_);
+ {
+ StatsCollector::EnabledScope stats_scope(
+ stats_collector_, internal_scope_id, "deltaInSeconds",
+ deadline_in_seconds - platform_->MonotonicallyIncreasingTime());
+
+ sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
+ }
+ if (sweep_complete) {
+ FinalizeSweep();
+ }
+ }
+ if (sweep_complete) NotifyDone();
+ return sweep_complete;
+ }
+
private:
class MutatorThreadSweepingScope final {
public:
@@ -701,33 +728,12 @@ class Sweeper::SweeperImpl final {
private:
void Run(double deadline_in_seconds) override {
- if (handle_.IsCanceled() || !sweeper_->is_in_progress_) return;
+ if (handle_.IsCanceled()) return;
- MutatorThreadSweepingScope sweeping_in_progresss(*sweeper_);
-
- bool sweep_complete;
- {
- StatsCollector::EnabledScope stats_scope(
- sweeper_->stats_collector_, StatsCollector::kIncrementalSweep);
-
- MutatorThreadSweeper sweeper(&sweeper_->space_states_,
- sweeper_->platform_);
- {
- StatsCollector::EnabledScope stats_scope(
- sweeper_->stats_collector_, StatsCollector::kSweepIdleStep,
- "idleDeltaInSeconds",
- (deadline_in_seconds -
- sweeper_->platform_->MonotonicallyIncreasingTime()));
-
- sweep_complete = sweeper.SweepWithDeadline(deadline_in_seconds);
- }
- if (sweep_complete) {
- sweeper_->FinalizeSweep();
- } else {
- sweeper_->ScheduleIncrementalSweeping();
- }
+ if (!sweeper_->PerformSweepOnMutatorThread(
+ deadline_in_seconds, StatsCollector::kSweepIdleStep)) {
+ sweeper_->ScheduleIncrementalSweeping();
}
- if (sweep_complete) sweeper_->NotifyDone();
}
Handle GetHandle() const { return handle_; }
@@ -807,5 +813,10 @@ bool Sweeper::IsSweepingInProgress() const {
return impl_->IsSweepingInProgress();
}
+bool Sweeper::PerformSweepOnMutatorThread(double deadline_in_seconds) {
+ return impl_->PerformSweepOnMutatorThread(deadline_in_seconds,
+ StatsCollector::kSweepInTask);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/chromium/v8/src/heap/cppgc/sweeper.h b/chromium/v8/src/heap/cppgc/sweeper.h
index 4c77ec69173..a13962aa914 100644
--- a/chromium/v8/src/heap/cppgc/sweeper.h
+++ b/chromium/v8/src/heap/cppgc/sweeper.h
@@ -9,6 +9,7 @@
#include "include/cppgc/heap.h"
#include "src/base/macros.h"
+#include "src/base/platform/time.h"
namespace cppgc {
@@ -49,6 +50,9 @@ class V8_EXPORT_PRIVATE Sweeper final {
bool IsSweepingOnMutatorThread() const;
bool IsSweepingInProgress() const;
+ // Assist with sweeping. Returns true if sweeping is done.
+ bool PerformSweepOnMutatorThread(double deadline_in_seconds);
+
private:
void WaitForConcurrentSweepingForTesting();
diff --git a/chromium/v8/src/heap/cppgc/trace-trait.cc b/chromium/v8/src/heap/cppgc/trace-trait.cc
index bf3759881b4..df14e3698be 100644
--- a/chromium/v8/src/heap/cppgc/trace-trait.cc
+++ b/chromium/v8/src/heap/cppgc/trace-trait.cc
@@ -18,9 +18,10 @@ TraceDescriptor TraceTraitFromInnerAddressImpl::GetTraceDescriptor(
page->SynchronizedLoad();
const HeapObjectHeader& header =
page->ObjectHeaderFromInnerAddress<AccessMode::kAtomic>(address);
- return {header.Payload(), GlobalGCInfoTable::GCInfoFromIndex(
- header.GetGCInfoIndex<AccessMode::kAtomic>())
- .trace};
+ return {header.ObjectStart(),
+ GlobalGCInfoTable::GCInfoFromIndex(
+ header.GetGCInfoIndex<AccessMode::kAtomic>())
+ .trace};
}
} // namespace internal
diff --git a/chromium/v8/src/heap/cppgc/visitor.cc b/chromium/v8/src/heap/cppgc/visitor.cc
index 33786f6fce3..9d90e4ab3e3 100644
--- a/chromium/v8/src/heap/cppgc/visitor.cc
+++ b/chromium/v8/src/heap/cppgc/visitor.cc
@@ -4,11 +4,12 @@
#include "src/heap/cppgc/visitor.h"
+#include "src/base/sanitizer/msan.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
+#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
-#include "src/heap/cppgc/sanitizers.h"
namespace cppgc {
@@ -29,15 +30,15 @@ namespace {
void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
const HeapObjectHeader& header) {
- Address* payload = reinterpret_cast<Address*>(header.Payload());
- const size_t payload_size = header.GetSize();
- for (size_t i = 0; i < (payload_size / sizeof(Address)); ++i) {
- Address maybe_ptr = payload[i];
+ const auto object_view = ObjectView(header);
+ Address* object = reinterpret_cast<Address*>(object_view.Start());
+ for (size_t i = 0; i < (object_view.Size() / sizeof(Address)); ++i) {
+ Address maybe_ptr = object[i];
#if defined(MEMORY_SANITIZER)
- // |payload| may be uninitialized by design or just contain padding bytes.
+ // |object| may be uninitialized by design or just contain padding bytes.
// Copy into a local variable that is not poisoned for conservative marking.
// Copy into a temporary variable to maintain the original MSAN state.
- MSAN_UNPOISON(&maybe_ptr, sizeof(maybe_ptr));
+ MSAN_MEMORY_IS_INITIALIZED(&maybe_ptr, sizeof(maybe_ptr));
#endif
if (maybe_ptr) {
conservative_visitor->TraceConservativelyIfNeeded(maybe_ptr);
@@ -49,8 +50,6 @@ void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
const void* address) {
- // TODO(chromium:1056170): Add page bloom filter
-
const BasePage* page = reinterpret_cast<const BasePage*>(
page_backend_.Lookup(static_cast<ConstAddress>(address)));
@@ -78,8 +77,8 @@ void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
void ConservativeTracingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
visitor_.Visit(
- header.Payload(),
- {header.Payload(),
+ header.ObjectStart(),
+ {header.ObjectStart(),
GlobalGCInfoTable::GCInfoFromIndex(header.GetGCInfoIndex()).trace});
}
diff --git a/chromium/v8/src/heap/embedder-tracing.cc b/chromium/v8/src/heap/embedder-tracing.cc
index 4fd747a964e..72bdde571a6 100644
--- a/chromium/v8/src/heap/embedder-tracing.cc
+++ b/chromium/v8/src/heap/embedder-tracing.cc
@@ -17,6 +17,7 @@ void LocalEmbedderHeapTracer::SetRemoteTracer(EmbedderHeapTracer* tracer) {
if (remote_tracer_) remote_tracer_->isolate_ = nullptr;
remote_tracer_ = tracer;
+ default_embedder_roots_handler_.SetTracer(tracer);
if (remote_tracer_)
remote_tracer_->isolate_ = reinterpret_cast<v8::Isolate*>(isolate_);
}
@@ -75,9 +76,8 @@ void LocalEmbedderHeapTracer::SetEmbedderStackStateForNextFinalization(
if (!InUse()) return;
embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state) {
- remote_tracer()->NotifyEmptyEmbedderStack();
- }
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ NotifyEmptyEmbedderStack();
}
namespace {
@@ -164,5 +164,33 @@ void LocalEmbedderHeapTracer::StartIncrementalMarkingIfNeeded() {
}
}
+void LocalEmbedderHeapTracer::NotifyEmptyEmbedderStack() {
+ auto* overriden_stack_state = isolate_->heap()->overriden_stack_state();
+ if (overriden_stack_state &&
+ (*overriden_stack_state ==
+ cppgc::EmbedderStackState::kMayContainHeapPointers))
+ return;
+
+ isolate_->global_handles()->NotifyEmptyEmbedderStack();
+}
+
+bool DefaultEmbedderRootsHandler::IsRoot(
+ const v8::TracedReference<v8::Value>& handle) {
+ return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
+}
+
+bool DefaultEmbedderRootsHandler::IsRoot(
+ const v8::TracedGlobal<v8::Value>& handle) {
+ return !tracer_ || tracer_->IsRootForNonTracingGC(handle);
+}
+
+void DefaultEmbedderRootsHandler::ResetRoot(
+ const v8::TracedReference<v8::Value>& handle) {
+ // Resetting is only called when IsRoot() returns false which
+ // can only happen the EmbedderHeapTracer is set on API level.
+ DCHECK(tracer_);
+ tracer_->ResetHandleInNonTracingGC(handle);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/embedder-tracing.h b/chromium/v8/src/heap/embedder-tracing.h
index 8a1b14a32b4..befb1a7e7ac 100644
--- a/chromium/v8/src/heap/embedder-tracing.h
+++ b/chromium/v8/src/heap/embedder-tracing.h
@@ -16,6 +16,19 @@ namespace internal {
class Heap;
class JSObject;
+class V8_EXPORT_PRIVATE DefaultEmbedderRootsHandler final
+ : public EmbedderRootsHandler {
+ public:
+ bool IsRoot(const v8::TracedReference<v8::Value>& handle) final;
+ bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) final;
+ void ResetRoot(const v8::TracedReference<v8::Value>& handle) final;
+
+ void SetTracer(EmbedderHeapTracer* tracer) { tracer_ = tracer; }
+
+ private:
+ EmbedderHeapTracer* tracer_ = nullptr;
+};
+
class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
public:
using WrapperInfo = std::pair<void*, void*>;
@@ -74,21 +87,6 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
bool Trace(double deadline);
bool IsRemoteTracingDone();
- bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle) {
- return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
- }
-
- bool IsRootForNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
- return !InUse() || remote_tracer_->IsRootForNonTracingGC(handle);
- }
-
- void ResetHandleInNonTracingGC(const v8::TracedReference<v8::Value>& handle) {
- // Resetting is only called when IsRootForNonTracingGC returns false which
- // can only happen the EmbedderHeapTracer is set on API level.
- DCHECK(InUse());
- remote_tracer_->ResetHandleInNonTracingGC(handle);
- }
-
bool ShouldFinalizeIncrementalMarking() {
return !FLAG_incremental_marking_wrappers || !InUse() ||
(IsRemoteTracingDone() && embedder_worklist_empty_);
@@ -130,6 +128,12 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
void UpdateRemoteStats(size_t, double);
+ DefaultEmbedderRootsHandler& default_embedder_roots_handler() {
+ return default_embedder_roots_handler_;
+ }
+
+ void NotifyEmptyEmbedderStack();
+
private:
static constexpr size_t kEmbedderAllocatedThreshold = 128 * KB;
@@ -147,6 +151,7 @@ class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
Isolate* const isolate_;
EmbedderHeapTracer* remote_tracer_ = nullptr;
+ DefaultEmbedderRootsHandler default_embedder_roots_handler_;
EmbedderHeapTracer::EmbedderStackState embedder_stack_state_ =
EmbedderHeapTracer::EmbedderStackState::kMayContainHeapPointers;
@@ -183,11 +188,8 @@ class V8_EXPORT_PRIVATE V8_NODISCARD EmbedderStackStateScope final {
: local_tracer_(local_tracer),
old_stack_state_(local_tracer_->embedder_stack_state_) {
local_tracer_->embedder_stack_state_ = stack_state;
- if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers ==
- stack_state) {
- if (local_tracer->remote_tracer())
- local_tracer->remote_tracer()->NotifyEmptyEmbedderStack();
- }
+ if (EmbedderHeapTracer::EmbedderStackState::kNoHeapPointers == stack_state)
+ local_tracer_->NotifyEmptyEmbedderStack();
}
~EmbedderStackStateScope() {
diff --git a/chromium/v8/src/heap/factory-base-inl.h b/chromium/v8/src/heap/factory-base-inl.h
index 6f218b8248c..6c1cede212c 100644
--- a/chromium/v8/src/heap/factory-base-inl.h
+++ b/chromium/v8/src/heap/factory-base-inl.h
@@ -6,9 +6,10 @@
#define V8_HEAP_FACTORY_BASE_INL_H_
#include "src/heap/factory-base.h"
-
#include "src/numbers/conversions.h"
#include "src/objects/heap-number.h"
+#include "src/objects/map.h"
+#include "src/objects/slots-inl.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
@@ -93,6 +94,29 @@ Handle<HeapNumber> FactoryBase<Impl>::NewHeapNumberWithHoleNaN() {
return NewHeapNumberFromBits<allocation>(kHoleNanInt64);
}
+template <typename Impl>
+template <typename StructType>
+StructType FactoryBase<Impl>::NewStructInternal(InstanceType type,
+ AllocationType allocation) {
+ ReadOnlyRoots roots = read_only_roots();
+ Map map = Map::GetInstanceTypeMap(roots, type);
+ int size = StructType::kSize;
+ return StructType::cast(NewStructInternal(roots, map, size, allocation));
+}
+
+template <typename Impl>
+Struct FactoryBase<Impl>::NewStructInternal(ReadOnlyRoots roots, Map map,
+ int size,
+ AllocationType allocation) {
+ DCHECK_EQ(size, map.instance_size());
+ HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
+ Struct str = Struct::cast(result);
+ Object value = roots.undefined_value();
+ int length = (size >> kTaggedSizeLog2) - 1;
+ MemsetTagged(str.RawField(Struct::kHeaderSize), value, length);
+ return str;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/heap/factory-base.cc b/chromium/v8/src/heap/factory-base.cc
index 45577f7bf91..df78716a8de 100644
--- a/chromium/v8/src/heap/factory-base.cc
+++ b/chromium/v8/src/heap/factory-base.cc
@@ -53,36 +53,30 @@ FactoryBase<LocalFactory>::NewHeapNumber<AllocationType::kOld>();
template <typename Impl>
Handle<Struct> FactoryBase<Impl>::NewStruct(InstanceType type,
AllocationType allocation) {
- return handle(NewStructInternal(type, allocation), isolate());
-}
-
-template <typename Impl>
-Struct FactoryBase<Impl>::NewStructInternal(InstanceType type,
- AllocationType allocation) {
- Map map = Map::GetInstanceTypeMap(read_only_roots(), type);
+ ReadOnlyRoots roots = read_only_roots();
+ Map map = Map::GetInstanceTypeMap(roots, type);
int size = map.instance_size();
- HeapObject result = AllocateRawWithImmortalMap(size, allocation, map);
- Struct str = Struct::cast(result);
- str.InitializeBody(size);
- return str;
+ return handle(NewStructInternal(roots, map, size, allocation), isolate());
}
template <typename Impl>
Handle<AccessorPair> FactoryBase<Impl>::NewAccessorPair() {
- Handle<AccessorPair> accessors = Handle<AccessorPair>::cast(
- NewStruct(ACCESSOR_PAIR_TYPE, AllocationType::kOld));
- AccessorPair raw = *accessors;
+ auto accessors =
+ NewStructInternal<AccessorPair>(ACCESSOR_PAIR_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- raw.set_getter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
- raw.set_setter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
- return accessors;
+ accessors.set_getter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ accessors.set_setter(read_only_roots().null_value(), SKIP_WRITE_BARRIER);
+ return handle(accessors, isolate());
}
template <typename Impl>
Handle<FixedArray> FactoryBase<Impl>::NewFixedArray(int length,
AllocationType allocation) {
- DCHECK_LE(0, length);
if (length == 0) return impl()->empty_fixed_array();
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
+ }
return NewFixedArrayWithFiller(
read_only_roots().fixed_array_map_handle(), length,
read_only_roots().undefined_value_handle(), allocation);
@@ -128,7 +122,8 @@ Handle<FixedArrayBase> FactoryBase<Impl>::NewFixedDoubleArray(
int length, AllocationType allocation) {
if (length == 0) return impl()->empty_fixed_array();
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
int size = FixedDoubleArray::SizeFor(length);
Map map = read_only_roots().fixed_double_array_map();
@@ -172,7 +167,8 @@ template <typename Impl>
Handle<ByteArray> FactoryBase<Impl>::NewByteArray(int length,
AllocationType allocation) {
if (length < 0 || length > ByteArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
int size = ByteArray::SizeFor(length);
HeapObject result = AllocateRawWithImmortalMap(
@@ -189,7 +185,8 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
Handle<FixedArray> constant_pool) {
if (length < 0 || length > BytecodeArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
// Bytecode array is AllocationType::kOld, so constant pool array should be
// too.
@@ -230,8 +227,8 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
DCHECK(source->IsString() || source->IsUndefined());
// Create and initialize script object.
ReadOnlyRoots roots = read_only_roots();
- Handle<Script> script =
- Handle<Script>::cast(NewStruct(SCRIPT_TYPE, AllocationType::kOld));
+ Handle<Script> script = handle(
+ NewStructInternal<Script>(SCRIPT_TYPE, AllocationType::kOld), isolate());
{
DisallowGarbageCollection no_gc;
Script raw = *script;
@@ -243,8 +240,8 @@ Handle<Script> FactoryBase<Impl>::NewScriptWithId(
raw.set_context_data(roots.undefined_value(), SKIP_WRITE_BARRIER);
raw.set_type(Script::TYPE_NORMAL);
raw.set_line_ends(roots.undefined_value(), SKIP_WRITE_BARRIER);
- raw.set_eval_from_shared_or_wrapped_arguments(roots.undefined_value(),
- SKIP_WRITE_BARRIER);
+ raw.set_eval_from_shared_or_wrapped_arguments_or_sfi_table(
+ roots.undefined_value(), SKIP_WRITE_BARRIER);
raw.set_eval_from_position(0);
raw.set_shared_function_infos(roots.empty_weak_fixed_array(),
SKIP_WRITE_BARRIER);
@@ -394,14 +391,12 @@ template <typename Impl>
Handle<ArrayBoilerplateDescription>
FactoryBase<Impl>::NewArrayBoilerplateDescription(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
- Handle<ArrayBoilerplateDescription> result =
- Handle<ArrayBoilerplateDescription>::cast(
- NewStruct(ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<ArrayBoilerplateDescription>(
+ ARRAY_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- ArrayBoilerplateDescription raw = *result;
- raw.set_elements_kind(elements_kind);
- raw.set_constant_elements(*constant_values);
- return result;
+ result.set_elements_kind(elements_kind);
+ result.set_constant_elements(*constant_values);
+ return handle(result, isolate());
}
template <typename Impl>
@@ -409,15 +404,13 @@ Handle<RegExpBoilerplateDescription>
FactoryBase<Impl>::NewRegExpBoilerplateDescription(Handle<FixedArray> data,
Handle<String> source,
Smi flags) {
- Handle<RegExpBoilerplateDescription> result =
- Handle<RegExpBoilerplateDescription>::cast(NewStruct(
- REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<RegExpBoilerplateDescription>(
+ REG_EXP_BOILERPLATE_DESCRIPTION_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- RegExpBoilerplateDescription raw = *result;
- raw.set_data(*data);
- raw.set_source(*source);
- raw.set_flags(flags.value());
- return result;
+ result.set_data(*data);
+ result.set_source(*source);
+ result.set_flags(flags.value());
+ return handle(result, isolate());
}
template <typename Impl>
@@ -426,14 +419,12 @@ FactoryBase<Impl>::NewTemplateObjectDescription(
Handle<FixedArray> raw_strings, Handle<FixedArray> cooked_strings) {
DCHECK_EQ(raw_strings->length(), cooked_strings->length());
DCHECK_LT(0, raw_strings->length());
- Handle<TemplateObjectDescription> result =
- Handle<TemplateObjectDescription>::cast(
- NewStruct(TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<TemplateObjectDescription>(
+ TEMPLATE_OBJECT_DESCRIPTION_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
- TemplateObjectDescription raw = *result;
- raw.set_raw_strings(*raw_strings);
- raw.set_cooked_strings(*cooked_strings);
- return result;
+ result.set_raw_strings(*raw_strings);
+ result.set_cooked_strings(*cooked_strings);
+ return handle(result, isolate());
}
template <typename Impl>
@@ -691,7 +682,8 @@ template <typename Impl>
Handle<FreshlyAllocatedBigInt> FactoryBase<Impl>::NewBigInt(
int length, AllocationType allocation) {
if (length < 0 || length > BigInt::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid BigInt length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
HeapObject result = AllocateRawWithImmortalMap(
BigInt::SizeFor(length), allocation, read_only_roots().bigint_map());
@@ -760,11 +752,11 @@ Handle<DescriptorArray> FactoryBase<Impl>::NewDescriptorArray(
template <typename Impl>
Handle<ClassPositions> FactoryBase<Impl>::NewClassPositions(int start,
int end) {
- Handle<ClassPositions> class_positions = Handle<ClassPositions>::cast(
- NewStruct(CLASS_POSITIONS_TYPE, AllocationType::kOld));
- class_positions->set_start(start);
- class_positions->set_end(end);
- return class_positions;
+ auto result = NewStructInternal<ClassPositions>(CLASS_POSITIONS_TYPE,
+ AllocationType::kOld);
+ result.set_start(start);
+ result.set_end(end);
+ return handle(result, isolate());
}
template <typename Impl>
@@ -825,7 +817,8 @@ template <typename Impl>
HeapObject FactoryBase<Impl>::AllocateRawFixedArray(int length,
AllocationType allocation) {
if (length < 0 || length > FixedArray::kMaxLength) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", length);
+ UNREACHABLE();
}
return AllocateRawArray(FixedArray::SizeFor(length), allocation);
}
@@ -834,7 +827,8 @@ template <typename Impl>
HeapObject FactoryBase<Impl>::AllocateRawWeakArrayList(
int capacity, AllocationType allocation) {
if (capacity < 0 || capacity > WeakArrayList::kMaxCapacity) {
- isolate()->FatalProcessOutOfHeapMemory("invalid array length");
+ FATAL("Fatal JavaScript invalid size error %d", capacity);
+ UNREACHABLE();
}
return AllocateRawArray(WeakArrayList::SizeForCapacity(capacity), allocation);
}
@@ -878,8 +872,9 @@ FactoryBase<Impl>::NewSwissNameDictionaryWithCapacity(
return read_only_roots().empty_swiss_property_dictionary_handle();
}
- if (capacity > SwissNameDictionary::MaxCapacity()) {
- isolate()->FatalProcessOutOfHeapMemory("invalid table size");
+ if (capacity < 0 || capacity > SwissNameDictionary::MaxCapacity()) {
+ FATAL("Fatal JavaScript invalid size error %d", capacity);
+ UNREACHABLE();
}
int meta_table_length = SwissNameDictionary::MetaTableSizeFor(capacity);
@@ -902,6 +897,18 @@ Handle<SwissNameDictionary> FactoryBase<Impl>::NewSwissNameDictionary(
SwissNameDictionary::CapacityFor(at_least_space_for), allocation);
}
+template <typename Impl>
+Handle<FunctionTemplateRareData>
+FactoryBase<Impl>::NewFunctionTemplateRareData() {
+ auto function_template_rare_data =
+ NewStructInternal<FunctionTemplateRareData>(
+ FUNCTION_TEMPLATE_RARE_DATA_TYPE, AllocationType::kOld);
+ DisallowGarbageCollection no_gc;
+ function_template_rare_data.set_c_function_overloads(
+ *impl()->empty_fixed_array(), SKIP_WRITE_BARRIER);
+ return handle(function_template_rare_data, isolate());
+}
+
// Instantiate FactoryBase for the two variants we want.
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) FactoryBase<Factory>;
template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
diff --git a/chromium/v8/src/heap/factory-base.h b/chromium/v8/src/heap/factory-base.h
index b964f6b2346..4e3d5efbe40 100644
--- a/chromium/v8/src/heap/factory-base.h
+++ b/chromium/v8/src/heap/factory-base.h
@@ -228,14 +228,19 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FactoryBase
Handle<SwissNameDictionary> NewSwissNameDictionaryWithCapacity(
int capacity, AllocationType allocation);
+ Handle<FunctionTemplateRareData> NewFunctionTemplateRareData();
+
protected:
// Allocate memory for an uninitialized array (e.g., a FixedArray or similar).
HeapObject AllocateRawArray(int size, AllocationType allocation);
HeapObject AllocateRawFixedArray(int length, AllocationType allocation);
HeapObject AllocateRawWeakArrayList(int length, AllocationType allocation);
- Struct NewStructInternal(InstanceType type,
- AllocationType allocation = AllocationType::kYoung);
+ template <typename StructType>
+ inline StructType NewStructInternal(InstanceType type,
+ AllocationType allocation);
+ Struct NewStructInternal(ReadOnlyRoots roots, Map map, int size,
+ AllocationType allocation);
HeapObject AllocateRawWithImmortalMap(
int size, AllocationType allocation, Map map,
diff --git a/chromium/v8/src/heap/factory.cc b/chromium/v8/src/heap/factory.cc
index 0c89a3fa9cc..6f753d23b42 100644
--- a/chromium/v8/src/heap/factory.cc
+++ b/chromium/v8/src/heap/factory.cc
@@ -53,6 +53,7 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/js-weak-refs-inl.h"
#include "src/objects/literal-objects-inl.h"
+#include "src/objects/megadom-handler-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
@@ -153,9 +154,8 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
if (is_executable_) {
DCHECK(IsAligned(code->address(), kCodeAlignment));
DCHECK_IMPLIES(
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
- !heap->memory_allocator()->code_range().is_empty(),
- heap->memory_allocator()->code_range().contains(code->address()));
+ !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
+ heap->code_region().contains(code->address()));
}
constexpr bool kIsNotOffHeapTrampoline = false;
@@ -314,8 +314,8 @@ Handle<HeapObject> Factory::NewFillerObject(int size, bool double_align,
}
Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
- PrototypeInfo result = PrototypeInfo::cast(
- NewStructInternal(PROTOTYPE_INFO_TYPE, AllocationType::kOld));
+ auto result = NewStructInternal<PrototypeInfo>(PROTOTYPE_INFO_TYPE,
+ AllocationType::kOld);
DisallowGarbageCollection no_gc;
result.set_prototype_users(Smi::zero());
result.set_registry_slot(PrototypeInfo::UNREGISTERED);
@@ -326,8 +326,8 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
Handle<FixedArray> indices) {
- EnumCache result =
- EnumCache::cast(NewStructInternal(ENUM_CACHE_TYPE, AllocationType::kOld));
+ auto result =
+ NewStructInternal<EnumCache>(ENUM_CACHE_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
result.set_keys(*keys);
result.set_indices(*indices);
@@ -336,7 +336,7 @@ Handle<EnumCache> Factory::NewEnumCache(Handle<FixedArray> keys,
Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
AllocationType allocation) {
- Tuple2 result = Tuple2::cast(NewStructInternal(TUPLE2_TYPE, allocation));
+ auto result = NewStructInternal<Tuple2>(TUPLE2_TYPE, allocation);
DisallowGarbageCollection no_gc;
result.set_value1(*value1);
result.set_value2(*value2);
@@ -345,8 +345,8 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
Handle<BaselineData> Factory::NewBaselineData(
Handle<Code> code, Handle<HeapObject> function_data) {
- BaselineData baseline_data = BaselineData::cast(
- NewStructInternal(BASELINE_DATA_TYPE, AllocationType::kOld));
+ auto baseline_data =
+ NewStructInternal<BaselineData>(BASELINE_DATA_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
baseline_data.set_baseline_code(*code);
baseline_data.set_data(*function_data);
@@ -410,20 +410,6 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
return handle(array, isolate());
}
-Handle<FixedArray> Factory::NewUninitializedFixedArray(int length) {
- if (length == 0) return empty_fixed_array();
- if (length < 0 || length > FixedArray::kMaxLength) {
- isolate()->heap()->FatalProcessOutOfMemory("invalid array length");
- }
-
- // TODO(ulan): As an experiment this temporarily returns an initialized fixed
- // array. After getting canary/performance coverage, either remove the
- // function or revert to returning uninitilized array.
- return NewFixedArrayWithFiller(read_only_roots().fixed_array_map_handle(),
- length, undefined_value(),
- AllocationType::kYoung);
-}
-
Handle<ClosureFeedbackCellArray> Factory::NewClosureFeedbackCellArray(
int length) {
if (length == 0) return empty_closure_feedback_cell_array();
@@ -557,9 +543,8 @@ Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) {
}
Handle<PropertyDescriptorObject> Factory::NewPropertyDescriptorObject() {
- PropertyDescriptorObject object =
- PropertyDescriptorObject::cast(NewStructInternal(
- PROPERTY_DESCRIPTOR_OBJECT_TYPE, AllocationType::kYoung));
+ auto object = NewStructInternal<PropertyDescriptorObject>(
+ PROPERTY_DESCRIPTOR_OBJECT_TYPE, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
object.set_flags(0);
Oddball the_hole = read_only_roots().the_hole_value();
@@ -1095,7 +1080,7 @@ Handle<NativeContext> Factory::NewNativeContext() {
// The ExternalPointerTable is a C++ object.
context.AllocateExternalPointerEntries(isolate());
context.set_scope_info(*native_scope_info());
- context.set_previous(Context::unchecked_cast(Smi::zero()));
+ context.set_previous(Context());
context.set_extension(*undefined_value());
context.set_errors_thrown(Smi::zero());
context.set_math_random_index(Smi::zero());
@@ -1179,7 +1164,7 @@ Handle<Context> Factory::NewCatchContext(Handle<Context> previous,
isolate()->catch_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
context.set(Context::THROWN_OBJECT_INDEX, *thrown_object, SKIP_WRITE_BARRIER);
@@ -1204,7 +1189,7 @@ Handle<Context> Factory::NewDebugEvaluateContext(Handle<Context> previous,
Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
context.set_extension(*ext, SKIP_WRITE_BARRIER);
@@ -1227,7 +1212,7 @@ Handle<Context> Factory::NewWithContext(Handle<Context> previous,
isolate()->with_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
context.set_extension(*extension, SKIP_WRITE_BARRIER);
@@ -1243,7 +1228,7 @@ Handle<Context> Factory::NewBlockContext(Handle<Context> previous,
isolate()->block_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(*scope_info, SKIP_WRITE_BARRIER);
context.set_previous(*previous, SKIP_WRITE_BARRIER);
return handle(context, isolate());
@@ -1256,7 +1241,7 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
isolate()->function_context_map(), Context::SizeFor(variadic_part_length),
variadic_part_length, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
- DCHECK(Heap::InYoungGeneration(context));
+ DCHECK_IMPLIES(!FLAG_single_generation, Heap::InYoungGeneration(context));
context.set_scope_info(read_only_roots().empty_scope_info(),
SKIP_WRITE_BARRIER);
context.set_previous(*native_context, SKIP_WRITE_BARRIER);
@@ -1265,15 +1250,15 @@ Handle<Context> Factory::NewBuiltinContext(Handle<NativeContext> native_context,
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
- AliasedArgumentsEntry entry = AliasedArgumentsEntry::cast(
- NewStructInternal(ALIASED_ARGUMENTS_ENTRY_TYPE, AllocationType::kYoung));
+ auto entry = NewStructInternal<AliasedArgumentsEntry>(
+ ALIASED_ARGUMENTS_ENTRY_TYPE, AllocationType::kYoung);
entry.set_aliased_context_slot(aliased_context_slot);
return handle(entry, isolate());
}
Handle<AccessorInfo> Factory::NewAccessorInfo() {
- AccessorInfo info = AccessorInfo::cast(
- NewStructInternal(ACCESSOR_INFO_TYPE, AllocationType::kOld));
+ auto info =
+ NewStructInternal<AccessorInfo>(ACCESSOR_INFO_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
info.set_name(*empty_string(), SKIP_WRITE_BARRIER);
info.set_flags(0); // Must clear the flags, it was initialized as undefined.
@@ -1311,8 +1296,8 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
new_script.set_context_data(old_script.context_data());
new_script.set_type(old_script.type());
new_script.set_line_ends(*undefined_value(), SKIP_WRITE_BARRIER);
- new_script.set_eval_from_shared_or_wrapped_arguments(
- script->eval_from_shared_or_wrapped_arguments());
+ new_script.set_eval_from_shared_or_wrapped_arguments_or_sfi_table(
+ script->eval_from_shared_or_wrapped_arguments_or_sfi_table());
new_script.set_shared_function_infos(*empty_weak_fixed_array(),
SKIP_WRITE_BARRIER);
new_script.set_eval_from_position(old_script.eval_from_position());
@@ -1330,8 +1315,8 @@ Handle<Script> Factory::CloneScript(Handle<Script> script) {
Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
Handle<Context> context) {
DCHECK(callable->IsCallable());
- CallableTask microtask = CallableTask::cast(
- NewStructInternal(CALLABLE_TASK_TYPE, AllocationType::kYoung));
+ auto microtask = NewStructInternal<CallableTask>(CALLABLE_TASK_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
microtask.set_callable(*callable, SKIP_WRITE_BARRIER);
microtask.set_context(*context, SKIP_WRITE_BARRIER);
@@ -1340,8 +1325,8 @@ Handle<CallableTask> Factory::NewCallableTask(Handle<JSReceiver> callable,
Handle<CallbackTask> Factory::NewCallbackTask(Handle<Foreign> callback,
Handle<Foreign> data) {
- CallbackTask microtask = CallbackTask::cast(
- NewStructInternal(CALLBACK_TASK_TYPE, AllocationType::kYoung));
+ auto microtask = NewStructInternal<CallbackTask>(CALLBACK_TASK_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
microtask.set_callback(*callback, SKIP_WRITE_BARRIER);
microtask.set_data(*data, SKIP_WRITE_BARRIER);
@@ -1352,9 +1337,8 @@ Handle<PromiseResolveThenableJobTask> Factory::NewPromiseResolveThenableJobTask(
Handle<JSPromise> promise_to_resolve, Handle<JSReceiver> thenable,
Handle<JSReceiver> then, Handle<Context> context) {
DCHECK(then->IsCallable());
- PromiseResolveThenableJobTask microtask =
- PromiseResolveThenableJobTask::cast(NewStructInternal(
- PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, AllocationType::kYoung));
+ auto microtask = NewStructInternal<PromiseResolveThenableJobTask>(
+ PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE, AllocationType::kYoung);
DisallowGarbageCollection no_gc;
microtask.set_promise_to_resolve(*promise_to_resolve, SKIP_WRITE_BARRIER);
microtask.set_thenable(*thenable, SKIP_WRITE_BARRIER);
@@ -1377,24 +1361,78 @@ Handle<Foreign> Factory::NewForeign(Address addr) {
#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> Factory::NewWasmTypeInfo(Address type_address,
- Handle<Map> opt_parent) {
+ Handle<Map> opt_parent,
+ int instance_size_bytes) {
+ // We pretenure WasmTypeInfo objects because they are refererenced by Maps,
+ // which are assumed to be long-lived. The supertypes list is constant
+ // after initialization, so we pretenure that too.
+ // The subtypes list, however, is expected to grow (and hence be replaced),
+ // so we don't pretenure it.
Handle<ArrayList> subtypes = ArrayList::New(isolate(), 0);
Handle<FixedArray> supertypes;
if (opt_parent.is_null()) {
- supertypes = NewUninitializedFixedArray(0);
+ supertypes = NewFixedArray(0);
} else {
- supertypes = CopyFixedArrayAndGrow(
- handle(opt_parent->wasm_type_info().supertypes(), isolate()), 1);
+ supertypes = CopyArrayAndGrow(
+ handle(opt_parent->wasm_type_info().supertypes(), isolate()), 1,
+ AllocationType::kOld);
supertypes->set(supertypes->length() - 1, *opt_parent);
}
Map map = *wasm_type_info_map();
WasmTypeInfo result = WasmTypeInfo::cast(AllocateRawWithImmortalMap(
- map.instance_size(), AllocationType::kYoung, map));
+ map.instance_size(), AllocationType::kOld, map));
DisallowGarbageCollection no_gc;
result.AllocateExternalPointerEntries(isolate());
result.set_foreign_address(isolate(), type_address);
result.set_supertypes(*supertypes, SKIP_WRITE_BARRIER);
- result.set_subtypes(*subtypes, SKIP_WRITE_BARRIER);
+ result.set_subtypes(*subtypes);
+ result.set_instance_size(instance_size_bytes);
+ return handle(result, isolate());
+}
+
+Handle<WasmJSFunctionData> Factory::NewWasmJSFunctionData(
+ Address opt_call_target, Handle<JSReceiver> callable, int return_count,
+ int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
+ Handle<Code> wrapper_code) {
+ Handle<Tuple2> pair = NewTuple2(null_value(), callable, AllocationType::kOld);
+ Map map = *wasm_js_function_data_map();
+ WasmJSFunctionData result =
+ WasmJSFunctionData::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kOld, map));
+ DisallowGarbageCollection no_gc;
+ result.AllocateExternalPointerEntries(isolate());
+ result.set_foreign_address(isolate(), opt_call_target);
+ result.set_ref(*pair);
+ result.set_serialized_return_count(return_count);
+ result.set_serialized_parameter_count(parameter_count);
+ result.set_serialized_signature(*serialized_sig);
+ result.set_wrapper_code(*wrapper_code);
+ // Default value, will be overwritten by the caller.
+ result.set_wasm_to_js_wrapper_code(
+ isolate()->heap()->builtin(Builtins::kAbort));
+ return handle(result, isolate());
+}
+
+Handle<WasmExportedFunctionData> Factory::NewWasmExportedFunctionData(
+ Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Address call_target, Handle<Object> ref, int func_index,
+ Address sig_address, int wrapper_budget) {
+ Handle<Foreign> sig_foreign = NewForeign(sig_address);
+ Map map = *wasm_exported_function_data_map();
+ WasmExportedFunctionData result =
+ WasmExportedFunctionData::cast(AllocateRawWithImmortalMap(
+ map.instance_size(), AllocationType::kOld, map));
+ DisallowGarbageCollection no_gc;
+ result.AllocateExternalPointerEntries(isolate());
+ result.set_foreign_address(isolate(), call_target);
+ result.set_ref(*ref);
+ result.set_wrapper_code(*export_wrapper);
+ result.set_instance(*instance);
+ result.set_function_index(func_index);
+ result.set_signature(*sig_foreign);
+ result.set_wrapper_budget(wrapper_budget);
+ result.set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
+ result.set_packed_args_size(0);
return handle(result, isolate());
}
@@ -1566,9 +1604,7 @@ Map Factory::InitializeMap(Map map, InstanceType type, int instance_size,
map.SetInstanceDescriptors(isolate(), *empty_descriptor_array(), 0);
// Must be called only after |instance_type| and |instance_size| are set.
map.set_visitor_id(Map::GetVisitorId(map));
- // TODO(solanes, v8:7790, v8:11353): set_relaxed_bit_field could be an atomic
- // set if TSAN could see the transitions happening in StoreIC.
- map.set_relaxed_bit_field(0);
+ map.set_bit_field(0);
map.set_bit_field2(Map::Bits2::NewTargetIsBaseBit::encode(true));
int bit_field3 =
Map::Bits3::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -1892,7 +1928,7 @@ Handle<JSObject> Factory::NewError(Handle<JSFunction> constructor,
Handle<Object> no_caller;
return ErrorUtils::Construct(isolate(), constructor, constructor, message,
- SKIP_NONE, no_caller,
+ undefined_value(), SKIP_NONE, no_caller,
ErrorUtils::StackTraceCollection::kDetailed)
.ToHandleChecked();
}
@@ -2075,9 +2111,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
#endif
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
DCHECK_IMPLIES(
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
- !heap->memory_allocator()->code_range().is_empty(),
- heap->memory_allocator()->code_range().contains(new_code->address()));
+ !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
+ heap->code_region().contains(new_code->address()));
return new_code;
}
@@ -2175,7 +2210,7 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
// Set up the global object as a normalized object.
global->set_global_dictionary(*dictionary, kReleaseStore);
- global->synchronized_set_map(raw_map);
+ global->set_map(raw_map, kReleaseStore);
// Make sure result is a global object with properties in dictionary.
DCHECK(global->IsJSGlobalObject() && !global->HasFastProperties());
@@ -2212,13 +2247,9 @@ void Factory::InitializeJSObjectBody(JSObject obj, Map map, int start_offset) {
// In case of Array subclassing the |map| could already be transitioned
// to different elements kind from the initial map on which we track slack.
bool in_progress = map.IsInobjectSlackTrackingInProgress();
- Object filler;
- if (in_progress) {
- filler = *one_pointer_filler_map();
- } else {
- filler = *undefined_value();
- }
- obj.InitializeBody(map, start_offset, *undefined_value(), filler);
+ obj.InitializeBody(map, start_offset, in_progress,
+ ReadOnlyRoots(isolate()).one_pointer_filler_map_word(),
+ *undefined_value());
if (in_progress) {
map.FindRootMap(isolate()).InobjectSlackTrackingStep(isolate());
}
@@ -2369,7 +2400,7 @@ Handle<FixedArrayBase> Factory::NewJSArrayStorage(
} else {
DCHECK(IsSmiOrObjectElementsKind(elements_kind));
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
- elms = NewUninitializedFixedArray(capacity);
+ elms = NewFixedArray(capacity);
} else {
DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
elms = NewFixedArrayWithHoles(capacity);
@@ -2444,7 +2475,8 @@ Handle<SourceTextModule> Factory::NewSourceTextModule(
module.set_status(Module::kUninstantiated);
module.set_exception(roots.the_hole_value(), SKIP_WRITE_BARRIER);
module.set_top_level_capability(roots.undefined_value(), SKIP_WRITE_BARRIER);
- module.set_import_meta(roots.the_hole_value(), SKIP_WRITE_BARRIER);
+ module.set_import_meta(roots.the_hole_value(), kReleaseStore,
+ SKIP_WRITE_BARRIER);
module.set_dfs_index(-1);
module.set_dfs_ancestor_index(-1);
module.set_flags(0);
@@ -2487,7 +2519,8 @@ Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(
isolate());
auto result =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
- result->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ result->Setup(SharedFlag::kNotShared, ResizableFlag::kNotResizable,
+ std::move(backing_store));
return result;
}
@@ -2505,18 +2538,32 @@ MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
isolate());
auto array_buffer =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
- array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ array_buffer->Setup(SharedFlag::kNotShared, ResizableFlag::kNotResizable,
+ std::move(backing_store));
return array_buffer;
}
Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer(
std::shared_ptr<BackingStore> backing_store) {
- Handle<Map> map(
- isolate()->native_context()->shared_array_buffer_fun().initial_map(),
- isolate());
+ Handle<Map> map;
+ if (backing_store->is_resizable()) {
+ DCHECK(FLAG_harmony_rab_gsab);
+ map = Handle<Map>(isolate()
+ ->native_context()
+ ->growable_shared_array_buffer_fun()
+ .initial_map(),
+ isolate());
+ } else {
+ map = Handle<Map>(
+ isolate()->native_context()->shared_array_buffer_fun().initial_map(),
+ isolate());
+ }
auto result = Handle<JSArrayBuffer>::cast(
NewJSObjectFromMap(map, AllocationType::kYoung));
- result->Setup(SharedFlag::kShared, std::move(backing_store));
+ ResizableFlag resizable = backing_store->is_resizable()
+ ? ResizableFlag::kResizable
+ : ResizableFlag::kNotResizable;
+ result->Setup(SharedFlag::kShared, resizable, std::move(backing_store));
return result;
}
@@ -2571,6 +2618,7 @@ void Factory::TypeAndSizeForElementsKind(ElementsKind kind,
*element_size = sizeof(ctype); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS_WITH_TYPED_ARRAY_TYPE(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
@@ -2653,6 +2701,8 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
raw.AllocateExternalPointerEntries(isolate());
raw.set_length(length);
raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
+ raw.set_is_length_tracking(false);
+ raw.set_is_backed_by_rab(!buffer->is_shared() && buffer->is_resizable());
return typed_array;
}
@@ -2791,7 +2841,7 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
// Reset the map for the object.
JSGlobalProxy raw = *object;
- raw.synchronized_set_map(*map);
+ raw.set_map(*map, kReleaseStore);
// Reinitialize the object from the constructor map.
InitializeJSObjectFromMap(raw, *raw_properties_or_hash, *map);
@@ -3016,11 +3066,11 @@ Handle<String> Factory::SizeToString(size_t value, bool check_cache) {
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
DCHECK(!shared->HasDebugInfo());
- DebugInfo debug_info =
- DebugInfo::cast(NewStructInternal(DEBUG_INFO_TYPE, AllocationType::kOld));
+ auto debug_info =
+ NewStructInternal<DebugInfo>(DEBUG_INFO_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
SharedFunctionInfo raw_shared = *shared;
- debug_info.set_flags(DebugInfo::kNone);
+ debug_info.set_flags(DebugInfo::kNone, kRelaxedStore);
debug_info.set_shared(raw_shared);
debug_info.set_debugger_hints(0);
DCHECK_EQ(DebugInfo::kNoDebuggingId, debug_info.debugging_id());
@@ -3039,8 +3089,8 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
}
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
- BreakPointInfo new_break_point_info = BreakPointInfo::cast(
- NewStructInternal(BREAK_POINT_INFO_TYPE, AllocationType::kOld));
+ auto new_break_point_info = NewStructInternal<BreakPointInfo>(
+ BREAK_POINT_INFO_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
new_break_point_info.set_source_position(source_position);
new_break_point_info.set_break_points(*undefined_value(), SKIP_WRITE_BARRIER);
@@ -3048,8 +3098,8 @@ Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
}
Handle<BreakPoint> Factory::NewBreakPoint(int id, Handle<String> condition) {
- BreakPoint new_break_point = BreakPoint::cast(
- NewStructInternal(BREAK_POINT_TYPE, AllocationType::kOld));
+ auto new_break_point =
+ NewStructInternal<BreakPoint>(BREAK_POINT_TYPE, AllocationType::kOld);
DisallowGarbageCollection no_gc;
new_break_point.set_id(id);
new_break_point.set_condition(*condition);
@@ -3060,8 +3110,8 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo(
Handle<Object> receiver_or_instance, Handle<Object> function,
Handle<HeapObject> code_object, int code_offset_or_source_position,
int flags, Handle<FixedArray> parameters) {
- StackFrameInfo info = StackFrameInfo::cast(
- NewStructInternal(STACK_FRAME_INFO_TYPE, AllocationType::kYoung));
+ auto info = NewStructInternal<StackFrameInfo>(STACK_FRAME_INFO_TYPE,
+ AllocationType::kYoung);
DisallowGarbageCollection no_gc;
info.set_receiver_or_instance(*receiver_or_instance, SKIP_WRITE_BARRIER);
info.set_function(*function, SKIP_WRITE_BARRIER);
@@ -3136,6 +3186,16 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<NativeContext> context,
return map;
}
+Handle<MegaDomHandler> Factory::NewMegaDomHandler(MaybeObjectHandle accessor,
+ MaybeObjectHandle context) {
+ Handle<Map> map = read_only_roots().mega_dom_handler_map_handle();
+ MegaDomHandler handler = MegaDomHandler::cast(New(map, AllocationType::kOld));
+ DisallowGarbageCollection no_gc;
+ handler.set_accessor(*accessor);
+ handler.set_context(*context);
+ return handle(handler, isolate());
+}
+
Handle<LoadHandler> Factory::NewLoadHandler(int data_count,
AllocationType allocation) {
Handle<Map> map;
@@ -3496,7 +3556,8 @@ Handle<JSPromise> Factory::NewJSPromiseWithoutHook() {
Handle<JSPromise> Factory::NewJSPromise() {
Handle<JSPromise> promise = NewJSPromiseWithoutHook();
- isolate()->RunPromiseHook(PromiseHookType::kInit, promise, undefined_value());
+ isolate()->RunAllPromiseHooks(PromiseHookType::kInit, promise,
+ undefined_value());
return promise;
}
@@ -3538,20 +3599,12 @@ Handle<JSFunction> Factory::JSFunctionBuilder::Build() {
PrepareMap();
PrepareFeedbackCell();
- // Determine the associated Code object.
- Handle<Code> code;
- const bool have_cached_code =
- sfi_->TryGetCachedCode(isolate_).ToHandle(&code);
- if (!have_cached_code) code = handle(sfi_->GetCode(), isolate_);
-
+ Handle<Code> code = handle(sfi_->GetCode(), isolate_);
Handle<JSFunction> result = BuildRaw(code);
- if (have_cached_code || code->kind() == CodeKind::BASELINE) {
+ if (code->kind() == CodeKind::BASELINE) {
IsCompiledScope is_compiled_scope(sfi_->is_compiled_scope(isolate_));
JSFunction::EnsureFeedbackVector(result, &is_compiled_scope);
- if (FLAG_trace_turbo_nci && have_cached_code) {
- CompilationCacheCode::TraceHit(sfi_, code);
- }
}
Compiler::PostInstantiation(result);
@@ -3583,7 +3636,8 @@ Handle<JSFunction> Factory::JSFunctionBuilder::BuildRaw(Handle<Code> code) {
function.set_code(*code, kReleaseStore, mode);
if (function.has_prototype_slot()) {
function.set_prototype_or_initial_map(
- ReadOnlyRoots(isolate).the_hole_value(), SKIP_WRITE_BARRIER);
+ ReadOnlyRoots(isolate).the_hole_value(), kReleaseStore,
+ SKIP_WRITE_BARRIER);
}
// Potentially body initialization.
diff --git a/chromium/v8/src/heap/factory.h b/chromium/v8/src/heap/factory.h
index ebec483de47..7f99c557095 100644
--- a/chromium/v8/src/heap/factory.h
+++ b/chromium/v8/src/heap/factory.h
@@ -131,9 +131,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
MaybeHandle<FixedArray> TryNewFixedArray(
int length, AllocationType allocation = AllocationType::kYoung);
- // Allocates an uninitialized fixed array. It must be filled by the caller.
- Handle<FixedArray> NewUninitializedFixedArray(int length);
-
// Allocates a closure feedback cell array whose feedback cells are
// initialized with undefined values.
Handle<ClosureFeedbackCellArray> NewClosureFeedbackCellArray(int num_slots);
@@ -557,7 +554,18 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
#if V8_ENABLE_WEBASSEMBLY
Handle<WasmTypeInfo> NewWasmTypeInfo(Address type_address,
- Handle<Map> opt_parent);
+ Handle<Map> opt_parent,
+ int instance_size_bytes);
+ Handle<WasmExportedFunctionData> NewWasmExportedFunctionData(
+ Handle<Code> export_wrapper, Handle<WasmInstanceObject> instance,
+ Address call_target, Handle<Object> ref, int func_index,
+ Address sig_address, int wrapper_budget);
+ // {opt_call_target} is kNullAddress for JavaScript functions, and
+ // non-null for exported Wasm functions.
+ Handle<WasmJSFunctionData> NewWasmJSFunctionData(
+ Address opt_call_target, Handle<JSReceiver> callable, int return_count,
+ int parameter_count, Handle<PodArray<wasm::ValueType>> serialized_sig,
+ Handle<Code> wrapper_code);
Handle<SharedFunctionInfo> NewSharedFunctionInfoForWasmExportedFunction(
Handle<String> name, Handle<WasmExportedFunctionData> data);
@@ -732,7 +740,8 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<LoadHandler> NewLoadHandler(
int data_count, AllocationType allocation = AllocationType::kOld);
Handle<StoreHandler> NewStoreHandler(int data_count);
-
+ Handle<MegaDomHandler> NewMegaDomHandler(MaybeObjectHandle accessor,
+ MaybeObjectHandle context);
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
// Creates a new FixedArray that holds the data associated with the
diff --git a/chromium/v8/src/heap/finalization-registry-cleanup-task.cc b/chromium/v8/src/heap/finalization-registry-cleanup-task.cc
index 2acfa31ffba..18222e783d1 100644
--- a/chromium/v8/src/heap/finalization-registry-cleanup-task.cc
+++ b/chromium/v8/src/heap/finalization-registry-cleanup-task.cc
@@ -59,8 +59,21 @@ void FinalizationRegistryCleanupTask::RunInternal() {
Context::cast(finalization_registry->native_context()), isolate);
Handle<Object> callback(finalization_registry->cleanup(), isolate);
v8::Context::Scope context_scope(v8::Utils::ToLocal(context));
- v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::TryCatch catcher(v8_isolate);
catcher.SetVerbose(true);
+ std::unique_ptr<MicrotasksScope> microtasks_scope;
+ MicrotaskQueue* microtask_queue =
+ finalization_registry->native_context().microtask_queue();
+ if (!microtask_queue) microtask_queue = isolate->default_microtask_queue();
+ if (microtask_queue &&
+ microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kScoped) {
+ // InvokeFinalizationRegistryCleanupFromTask will call into V8 API methods,
+ // so we need a valid microtasks scope on the stack to avoid running into
+ // the CallDepthScope check.
+ microtasks_scope.reset(new v8::MicrotasksScope(
+ v8_isolate, microtask_queue, v8::MicrotasksScope::kDoNotRunMicrotasks));
+ }
// Exceptions are reported via the message handler. This is ensured by the
// verbose TryCatch.
diff --git a/chromium/v8/src/heap/free-list.cc b/chromium/v8/src/heap/free-list.cc
index 80b4a4f01f2..9f13247a2e6 100644
--- a/chromium/v8/src/heap/free-list.cc
+++ b/chromium/v8/src/heap/free-list.cc
@@ -48,7 +48,7 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
for (FreeSpace cur_node = top(); !cur_node.is_null();
cur_node = cur_node.next()) {
DCHECK(Page::FromHeapObject(cur_node)->CanAllocate());
- size_t size = cur_node.size();
+ size_t size = cur_node.size(kRelaxedLoad);
if (size >= minimum_size) {
DCHECK_GE(available_, size);
UpdateCountersAfterAllocation(size);
@@ -91,10 +91,10 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
FreeSpace n = top();
while (!n.is_null()) {
ObjectSlot map_slot = n.map_slot();
- if (map_slot.contains_value(kNullAddress)) {
- map_slot.store(free_space_map);
+ if (map_slot.contains_map_value(kNullAddress)) {
+ map_slot.store_map(free_space_map);
} else {
- DCHECK(map_slot.contains_value(free_space_map.ptr()));
+ DCHECK(map_slot.contains_map_value(free_space_map.ptr()));
}
n = n.next();
}
@@ -504,12 +504,13 @@ size_t FreeListCategory::SumFreeList() {
while (!cur.is_null()) {
// We can't use "cur->map()" here because both cur's map and the
// root can be null during bootstrapping.
- DCHECK(cur.map_slot().contains_value(Page::FromHeapObject(cur)
- ->heap()
- ->isolate()
- ->root(RootIndex::kFreeSpaceMap)
- .ptr()));
- sum += cur.relaxed_read_size();
+ DCHECK(
+ cur.map_slot().contains_map_value(Page::FromHeapObject(cur)
+ ->heap()
+ ->isolate()
+ ->root(RootIndex::kFreeSpaceMap)
+ .ptr()));
+ sum += cur.size(kRelaxedLoad);
cur = cur.next();
}
return sum;
diff --git a/chromium/v8/src/heap/gc-tracer.cc b/chromium/v8/src/heap/gc-tracer.cc
index b4f86cc2a15..70f8c276270 100644
--- a/chromium/v8/src/heap/gc-tracer.cc
+++ b/chromium/v8/src/heap/gc-tracer.cc
@@ -13,7 +13,9 @@
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/spaces.h"
-#include "src/logging/counters-inl.h"
+#include "src/logging/counters.h"
+#include "src/logging/tracing-flags.h"
+#include "src/tracing/tracing-category-observer.h"
namespace v8 {
namespace internal {
@@ -28,6 +30,8 @@ static size_t CountTotalHolesSize(Heap* heap) {
}
return holes_size;
}
+
+#ifdef V8_RUNTIME_CALL_STATS
WorkerThreadRuntimeCallStats* GCTracer::worker_thread_runtime_call_stats() {
return heap_->isolate()->counters()->worker_thread_runtime_call_stats();
}
@@ -38,6 +42,7 @@ RuntimeCallCounterId GCTracer::RCSCounterFromScope(Scope::ScopeId id) {
static_cast<int>(RuntimeCallCounterId::kGC_MC_INCREMENTAL) +
static_cast<int>(id));
}
+#endif // defined(V8_RUNTIME_CALL_STATS)
double GCTracer::MonotonicallyIncreasingTimeInMs() {
if (V8_UNLIKELY(FLAG_predictable)) {
@@ -61,6 +66,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
start_time_ = tracer_->MonotonicallyIncreasingTimeInMs();
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
+#ifdef V8_RUNTIME_CALL_STATS
if (thread_kind_ == ThreadKind::kMain) {
DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
runtime_stats_ =
@@ -72,6 +78,7 @@ GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind)
runtime_stats_ = runtime_call_stats_scope_->Get();
runtime_stats_->Enter(&timer_, GCTracer::RCSCounterFromScope(scope));
}
+#endif // defined(V8_RUNTIME_CALL_STATS)
}
GCTracer::Scope::~Scope() {
@@ -80,12 +87,23 @@ GCTracer::Scope::~Scope() {
if (thread_kind_ == ThreadKind::kMain) {
DCHECK_EQ(tracer_->heap_->isolate()->thread_id(), ThreadId::Current());
tracer_->AddScopeSample(scope_, duration_ms);
+ if (scope_ == ScopeId::MC_INCREMENTAL ||
+ scope_ == ScopeId::MC_INCREMENTAL_START ||
+ scope_ == MC_INCREMENTAL_FINALIZE) {
+ auto* long_task_stats =
+ tracer_->heap_->isolate()->GetCurrentLongTaskStats();
+ long_task_stats->gc_full_incremental_wall_clock_duration_us +=
+ static_cast<int64_t>(duration_ms *
+ base::Time::kMicrosecondsPerMillisecond);
+ }
} else {
tracer_->AddScopeSampleBackground(scope_, duration_ms);
}
+#ifdef V8_RUNTIME_CALL_STATS
if (V8_LIKELY(runtime_stats_ == nullptr)) return;
runtime_stats_->Leave(&timer_);
+#endif // defined(V8_RUNTIME_CALL_STATS)
}
const char* GCTracer::Scope::Name(ScopeId id) {
@@ -290,8 +308,10 @@ void GCTracer::StartInSafepoint() {
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
- current_.young_object_size =
- heap_->new_space()->Size() + heap_->new_lo_space()->SizeOfObjects();
+ size_t new_space_size = (heap_->new_space() ? heap_->new_space()->Size() : 0);
+ size_t new_lo_space_size =
+ (heap_->new_lo_space() ? heap_->new_lo_space()->SizeOfObjects() : 0);
+ current_.young_object_size = new_space_size + new_lo_space_size;
}
void GCTracer::ResetIncrementalMarkingCounters() {
@@ -333,6 +353,9 @@ void GCTracer::Stop(GarbageCollector collector) {
AddAllocation(current_.end_time);
double duration = current_.end_time - current_.start_time;
+ int64_t duration_us =
+ static_cast<int64_t>(duration * base::Time::kMicrosecondsPerMillisecond);
+ auto* long_task_stats = heap_->isolate()->GetCurrentLongTaskStats();
switch (current_.type) {
case Event::SCAVENGER:
@@ -342,6 +365,7 @@ void GCTracer::Stop(GarbageCollector collector) {
recorded_minor_gcs_survived_.Push(
MakeBytesAndDuration(current_.survived_young_object_size, duration));
FetchBackgroundMinorGCCounters();
+ long_task_stats->gc_young_wall_clock_duration_us += duration_us;
break;
case Event::INCREMENTAL_MARK_COMPACTOR:
current_.incremental_marking_bytes = incremental_marking_bytes_;
@@ -361,6 +385,7 @@ void GCTracer::Stop(GarbageCollector collector) {
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
+ long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
break;
case Event::MARK_COMPACTOR:
DCHECK_EQ(0u, current_.incremental_marking_bytes);
@@ -373,6 +398,7 @@ void GCTracer::Stop(GarbageCollector collector) {
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
FetchBackgroundMarkCompactCounters();
+ long_task_stats->gc_full_atomic_wall_clock_duration_us += duration_us;
break;
case Event::START:
UNREACHABLE();
diff --git a/chromium/v8/src/heap/gc-tracer.h b/chromium/v8/src/heap/gc-tracer.h
index 011889ba66e..3a665726ca7 100644
--- a/chromium/v8/src/heap/gc-tracer.h
+++ b/chromium/v8/src/heap/gc-tracer.h
@@ -113,9 +113,11 @@ class V8_EXPORT_PRIVATE GCTracer {
ScopeId scope_;
ThreadKind thread_kind_;
double start_time_;
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallTimer timer_;
RuntimeCallStats* runtime_stats_ = nullptr;
base::Optional<WorkerThreadRuntimeCallStatsScope> runtime_call_stats_scope_;
+#endif // defined(V8_RUNTIME_CALL_STATS)
};
class Event {
@@ -195,7 +197,9 @@ class V8_EXPORT_PRIVATE GCTracer {
static double CombineSpeedsInBytesPerMillisecond(double default_speed,
double optional_speed);
+#ifdef V8_RUNTIME_CALL_STATS
static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
+#endif // defined(V8_RUNTIME_CALL_STATS)
explicit GCTracer(Heap* heap);
@@ -335,7 +339,9 @@ class V8_EXPORT_PRIVATE GCTracer {
double AverageTimeToIncrementalMarkingTask() const;
void RecordTimeToIncrementalMarkingTask(double time_to_task);
+#ifdef V8_RUNTIME_CALL_STATS
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
+#endif // defined(V8_RUNTIME_CALL_STATS)
CollectionEpoch CurrentEpoch(Scope::ScopeId id);
diff --git a/chromium/v8/src/heap/heap-inl.h b/chromium/v8/src/heap/heap-inl.h
index 8372dd518d7..8c2649e0ef8 100644
--- a/chromium/v8/src/heap/heap-inl.h
+++ b/chromium/v8/src/heap/heap-inl.h
@@ -8,22 +8,17 @@
#include <cmath>
// Clients of this interface shouldn't depend on lots of heap internals.
-// Do not include anything from src/heap other than src/heap/heap.h and its
-// write barrier here!
+// Avoid including anything but `heap.h` from `src/heap` where possible.
#include "src/base/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/msan.h"
#include "src/common/assert-scope.h"
-#include "src/heap/heap-write-barrier.h"
-#include "src/heap/heap.h"
-#include "src/heap/third-party/heap-api.h"
-#include "src/objects/feedback-vector.h"
-
-// TODO(gc): There is one more include to remove in order to no longer
-// leak heap internals to users of this interface!
#include "src/execution/isolate-data.h"
#include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h"
+#include "src/heap/heap-write-barrier.h"
+#include "src/heap/heap.h"
#include "src/heap/large-spaces.h"
#include "src/heap/memory-allocator.h"
#include "src/heap/memory-chunk.h"
@@ -31,11 +26,13 @@
#include "src/heap/paged-spaces-inl.h"
#include "src/heap/read-only-spaces.h"
#include "src/heap/spaces-inl.h"
+#include "src/heap/third-party/heap-api.h"
#include "src/objects/allocation-site-inl.h"
#include "src/objects/api-callbacks-inl.h"
#include "src/objects/cell-inl.h"
#include "src/objects/descriptor-array.h"
#include "src/objects/feedback-cell-inl.h"
+#include "src/objects/feedback-vector.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/oddball.h"
@@ -45,7 +42,6 @@
#include "src/objects/slots-inl.h"
#include "src/objects/struct-inl.h"
#include "src/profiler/heap-profiler.h"
-#include "src/sanitizer/msan.h"
#include "src/strings/string-hasher.h"
#include "src/zone/zone-list-inl.h"
@@ -162,19 +158,12 @@ Address* Heap::OldSpaceAllocationLimitAddress() {
return old_space_->allocation_limit_address();
}
-void Heap::UpdateNewSpaceAllocationCounter() {
- new_space_allocation_counter_ = NewSpaceAllocationCounter();
-}
-
-size_t Heap::NewSpaceAllocationCounter() {
- return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
-}
-
-inline const base::AddressRegion& Heap::code_range() {
+inline const base::AddressRegion& Heap::code_region() {
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
return tp_heap_->GetCodeRange();
#else
- return memory_allocator_->code_range();
+ static constexpr base::AddressRegion kEmptyRegion;
+ return code_range_ ? code_range_->reservation()->region() : kEmptyRegion;
#endif
}
@@ -189,7 +178,8 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) {
if (!always_allocate() && Heap::allocation_timeout_-- <= 0) {
- return AllocationResult::Retry();
+ AllocationSpace space = FLAG_single_generation ? OLD_SPACE : NEW_SPACE;
+ return AllocationResult::Retry(space);
}
}
#endif
@@ -197,6 +187,10 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
IncrementObjectCounters();
#endif
+ if (CanSafepoint()) {
+ main_thread_local_heap()->Safepoint();
+ }
+
size_t large_object_threshold = MaxRegularHeapObjectSize(type);
bool large_object =
static_cast<size_t>(size_in_bytes) > large_object_threshold;
@@ -245,6 +239,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationType type,
DCHECK(CanAllocateInReadOnlySpace());
DCHECK_EQ(AllocationOrigin::kRuntime, origin);
allocation = read_only_space_->AllocateRaw(size_in_bytes, alignment);
+ } else if (AllocationType::kSharedOld == type) {
+ allocation =
+ shared_old_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
+ } else if (AllocationType::kSharedMap == type) {
+ allocation =
+ shared_map_allocator_->AllocateRaw(size_in_bytes, alignment, origin);
} else {
UNREACHABLE();
}
@@ -285,10 +285,9 @@ HeapObject Heap::AllocateRawWith(int size, AllocationType allocation,
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK_EQ(gc_state(), NOT_IN_GC);
Heap* heap = isolate()->heap();
- if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
- allocation == AllocationType::kYoung &&
+ if (allocation == AllocationType::kYoung &&
alignment == AllocationAlignment::kWordAligned &&
- size <= MaxRegularHeapObjectSize(allocation)) {
+ size <= MaxRegularHeapObjectSize(allocation) && !FLAG_single_generation) {
Address* top = heap->NewSpaceAllocationTopAddress();
Address* limit = heap->NewSpaceAllocationLimitAddress();
if ((*limit - *top >= static_cast<unsigned>(size)) &&
@@ -383,17 +382,21 @@ void Heap::RegisterExternalString(String string) {
void Heap::FinalizeExternalString(String string) {
DCHECK(string.IsExternalString());
- Page* page = Page::FromHeapObject(string);
ExternalString ext_string = ExternalString::cast(string);
- page->DecrementExternalBackingStoreBytes(
- ExternalBackingStoreType::kExternalString,
- ext_string.ExternalPayloadSize());
+ if (!FLAG_enable_third_party_heap) {
+ Page* page = Page::FromHeapObject(string);
+ page->DecrementExternalBackingStoreBytes(
+ ExternalBackingStoreType::kExternalString,
+ ext_string.ExternalPayloadSize());
+ }
ext_string.DisposeResource(isolate());
}
-Address Heap::NewSpaceTop() { return new_space_->top(); }
+Address Heap::NewSpaceTop() {
+ return new_space_ ? new_space_->top() : kNullAddress;
+}
bool Heap::InYoungGeneration(Object object) {
DCHECK(!HasWeakHeapObjectTag(object));
@@ -458,7 +461,12 @@ bool Heap::InToPage(HeapObject heap_object) {
return BasicMemoryChunk::FromHeapObject(heap_object)->IsToPage();
}
-bool Heap::InOldSpace(Object object) { return old_space_->Contains(object); }
+bool Heap::InOldSpace(Object object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
+ return object.IsHeapObject() &&
+ third_party_heap::Heap::InOldSpace(object.ptr());
+ return old_space_->Contains(object);
+}
// static
Heap* Heap::FromWritableHeapObject(HeapObject obj) {
@@ -502,7 +510,7 @@ AllocationMemento Heap::FindAllocationMemento(Map map, HeapObject object) {
// below (memento_address == top) ensures that this is safe. Mark the word as
// initialized to silence MemorySanitizer warnings.
MSAN_MEMORY_IS_INITIALIZED(candidate_map_slot.address(), kTaggedSize);
- if (!candidate_map_slot.contains_value(
+ if (!candidate_map_slot.contains_map_value(
ReadOnlyRoots(this).allocation_memento_map().ptr())) {
return AllocationMemento();
}
@@ -577,18 +585,23 @@ void Heap::UpdateAllocationSite(Map map, HeapObject object,
bool Heap::IsPendingAllocation(HeapObject object) {
// TODO(ulan): Optimize this function to perform 3 loads at most.
Address addr = object.address();
- Address top = new_space_->original_top_acquire();
- Address limit = new_space_->original_limit_relaxed();
- if (top <= addr && addr < limit) return true;
+ Address top, limit;
+
+ if (new_space_) {
+ top = new_space_->original_top_acquire();
+ limit = new_space_->original_limit_relaxed();
+ if (top && top <= addr && addr < limit) return true;
+ }
+
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
top = space->original_top_acquire();
limit = space->original_limit_relaxed();
- if (top <= addr && addr < limit) return true;
+ if (top && top <= addr && addr < limit) return true;
}
if (addr == lo_space_->pending_object()) return true;
- if (addr == new_lo_space_->pending_object()) return true;
+ if (new_lo_space_ && addr == new_lo_space_->pending_object()) return true;
if (addr == code_lo_space_->pending_object()) return true;
return false;
}
@@ -644,8 +657,8 @@ int Heap::NextDebuggingId() {
}
int Heap::GetNextTemplateSerialNumber() {
- int next_serial_number = next_template_serial_number().value() + 1;
- set_next_template_serial_number(Smi::FromInt(next_serial_number));
+ int next_serial_number = next_template_serial_number().value();
+ set_next_template_serial_number(Smi::FromInt(next_serial_number + 1));
return next_serial_number;
}
diff --git a/chromium/v8/src/heap/heap-write-barrier.cc b/chromium/v8/src/heap/heap-write-barrier.cc
index 63949de2433..0030615bab4 100644
--- a/chromium/v8/src/heap/heap-write-barrier.cc
+++ b/chromium/v8/src/heap/heap-write-barrier.cc
@@ -68,7 +68,16 @@ void WriteBarrier::MarkingSlow(Heap* heap, DescriptorArray descriptor_array,
int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
HeapObject host = HeapObject::cast(Object(raw_host));
MaybeObjectSlot slot(raw_slot);
- WriteBarrier::Marking(host, slot, *slot);
+ Address value = (*slot).ptr();
+#ifdef V8_MAP_PACKING
+ if (slot.address() == host.address()) {
+ // Clear metadata bits and fix object tag.
+ value = (value & ~Internals::kMapWordMetadataMask &
+ ~Internals::kMapWordXorMask) |
+ (uint64_t)kHeapObjectTag;
+ }
+#endif
+ WriteBarrier::Marking(host, slot, MaybeObject(value));
// Called by RecordWriteCodeStubAssembler, which doesnt accept void type
return 0;
}
diff --git a/chromium/v8/src/heap/heap.cc b/chromium/v8/src/heap/heap.cc
index 8b8c30a1b0b..9da67b75344 100644
--- a/chromium/v8/src/heap/heap.cc
+++ b/chromium/v8/src/heap/heap.cc
@@ -14,6 +14,7 @@
#include "src/api/api-inl.h"
#include "src/base/bits.h"
#include "src/base/flags.h"
+#include "src/base/logging.h"
#include "src/base/once.h"
#include "src/base/platform/mutex.h"
#include "src/base/utils/random-number-generator.h"
@@ -35,6 +36,7 @@
#include "src/heap/barrier.h"
#include "src/heap/base/stack.h"
#include "src/heap/code-object-registry.h"
+#include "src/heap/code-range.h"
#include "src/heap/code-stats.h"
#include "src/heap/collection-barrier.h"
#include "src/heap/combined-heap.h"
@@ -323,7 +325,9 @@ size_t Heap::SemiSpaceSizeFromYoungGenerationSize(
size_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
- return new_space_->Capacity() + OldGenerationCapacity();
+ if (FLAG_enable_third_party_heap) return tp_heap_->Capacity();
+
+ return NewSpaceCapacity() + OldGenerationCapacity();
}
size_t Heap::OldGenerationCapacity() {
@@ -358,7 +362,10 @@ size_t Heap::CommittedMemoryOfUnmapper() {
size_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
- return new_space_->CommittedMemory() + new_lo_space_->Size() +
+ size_t new_space_committed = new_space_ ? new_space_->CommittedMemory() : 0;
+ size_t new_lo_space_committed = new_lo_space_ ? new_lo_space_->Size() : 0;
+
+ return new_space_committed + new_lo_space_committed +
CommittedOldGenerationMemory();
}
@@ -421,14 +428,17 @@ bool Heap::CanExpandOldGenerationBackground(LocalHeap* local_heap,
}
bool Heap::CanPromoteYoungAndExpandOldGeneration(size_t size) {
+ size_t new_space_capacity = NewSpaceCapacity();
+ size_t new_lo_space_capacity = new_lo_space_ ? new_lo_space_->Size() : 0;
+
// Over-estimate the new space size using capacity to allow some slack.
- return CanExpandOldGeneration(size + new_space_->Capacity() +
- new_lo_space_->Size());
+ return CanExpandOldGeneration(size + new_space_capacity +
+ new_lo_space_capacity);
}
bool Heap::HasBeenSetUp() const {
- // We will always have a new space when the heap is set up.
- return new_space_ != nullptr;
+ // We will always have an old space when the heap is set up.
+ return old_space_ != nullptr;
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
@@ -440,7 +450,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
- if (FLAG_gc_global || ShouldStressCompaction()) {
+ if (FLAG_gc_global || ShouldStressCompaction() || FLAG_single_generation) {
*reason = "GC in old space forced by flags";
return MARK_COMPACTOR;
}
@@ -485,7 +495,7 @@ void Heap::PrintShortHeapStatistics() {
"New space, used: %6zu KB"
", available: %6zu KB"
", committed: %6zu KB\n",
- new_space_->Size() / KB, new_space_->Available() / KB,
+ NewSpaceSize() / KB, new_space_->Available() / KB,
new_space_->CommittedMemory() / KB);
PrintIsolate(isolate_,
"New large object space, used: %6zu KB"
@@ -788,6 +798,58 @@ void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) {
PrintF("-------------------------------------------------\n");
}
+void UpdateRetainersMapAfterScavenge(
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher>* map) {
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher> updated_map;
+
+ for (auto pair : *map) {
+ HeapObject object = pair.first;
+ HeapObject retainer = pair.second;
+
+ if (Heap::InFromPage(object)) {
+ MapWord map_word = object.map_word(kRelaxedLoad);
+ if (!map_word.IsForwardingAddress()) continue;
+ object = map_word.ToForwardingAddress();
+ }
+
+ if (Heap::InFromPage(retainer)) {
+ MapWord map_word = retainer.map_word(kRelaxedLoad);
+ if (!map_word.IsForwardingAddress()) continue;
+ retainer = map_word.ToForwardingAddress();
+ }
+
+ updated_map[object] = retainer;
+ }
+
+ *map = std::move(updated_map);
+}
+
+void Heap::UpdateRetainersAfterScavenge() {
+ if (!incremental_marking()->IsMarking()) return;
+
+ // This isn't supported for Minor MC.
+ DCHECK(!FLAG_minor_mc);
+
+ UpdateRetainersMapAfterScavenge(&retainer_);
+ UpdateRetainersMapAfterScavenge(&ephemeron_retainer_);
+
+ std::unordered_map<HeapObject, Root, Object::Hasher> updated_retaining_root;
+
+ for (auto pair : retaining_root_) {
+ HeapObject object = pair.first;
+
+ if (Heap::InFromPage(object)) {
+ MapWord map_word = object.map_word(kRelaxedLoad);
+ if (!map_word.IsForwardingAddress()) continue;
+ object = map_word.ToForwardingAddress();
+ }
+
+ updated_retaining_root[object] = pair.second;
+ }
+
+ retaining_root_ = std::move(updated_retaining_root);
+}
+
void Heap::AddRetainer(HeapObject retainer, HeapObject object) {
if (retainer_.count(object)) return;
retainer_[object] = retainer;
@@ -850,7 +912,7 @@ void Heap::GarbageCollectionPrologue() {
if (FLAG_gc_verbose) Print();
#endif // DEBUG
- if (new_space_->IsAtMaximumCapacity()) {
+ if (new_space_ && new_space_->IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
maximum_size_scavenges_ = 0;
@@ -867,9 +929,20 @@ void Heap::GarbageCollectionPrologueInSafepoint() {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE_SAFEPOINT);
gc_count_++;
- UpdateNewSpaceAllocationCounter();
- CheckNewSpaceExpansionCriteria();
- new_space_->ResetParkedAllocationBuffers();
+ if (new_space_) {
+ UpdateNewSpaceAllocationCounter();
+ CheckNewSpaceExpansionCriteria();
+ new_space_->ResetParkedAllocationBuffers();
+ }
+}
+
+void Heap::UpdateNewSpaceAllocationCounter() {
+ new_space_allocation_counter_ = NewSpaceAllocationCounter();
+}
+
+size_t Heap::NewSpaceAllocationCounter() {
+ return new_space_allocation_counter_ +
+ (new_space_ ? new_space()->AllocatedSinceLastGC() : 0);
}
size_t Heap::SizeOfObjects() {
@@ -894,7 +967,7 @@ void Heap::MergeAllocationSitePretenuringFeedback(
AllocationSite site;
for (auto& site_and_count : local_pretenuring_feedback) {
site = site_and_count.first;
- MapWord map_word = site_and_count.first.map_word();
+ MapWord map_word = site_and_count.first.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
site = AllocationSite::cast(map_word.ToForwardingAddress());
}
@@ -944,14 +1017,15 @@ void Heap::RemoveAllocationObserversFromAllSpaces(
}
void Heap::PublishPendingAllocations() {
- new_space_->MarkLabStartInitialized();
+ if (FLAG_enable_third_party_heap) return;
+ if (new_space_) new_space_->MarkLabStartInitialized();
PagedSpaceIterator spaces(this);
for (PagedSpace* space = spaces.Next(); space != nullptr;
space = spaces.Next()) {
space->MoveOriginalTopForward();
}
lo_space_->ResetPendingObject();
- new_lo_space_->ResetPendingObject();
+ if (new_lo_space_) new_lo_space_->ResetPendingObject();
code_lo_space_->ResetPendingObject();
}
@@ -981,6 +1055,12 @@ inline bool MakePretenureDecision(
return false;
}
+// Clear feedback calculation fields until the next gc.
+inline void ResetPretenuringFeedback(AllocationSite site) {
+ site.set_memento_found_count(0);
+ site.set_memento_create_count(0);
+}
+
inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
bool maximum_size_scavenge) {
bool deopt = false;
@@ -1008,11 +1088,34 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
site.PretenureDecisionName(site.pretenure_decision()));
}
- // Clear feedback calculation fields until the next gc.
- site.set_memento_found_count(0);
- site.set_memento_create_count(0);
+ ResetPretenuringFeedback(site);
return deopt;
}
+
+bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
+ AllocationSite::PretenureDecision current_decision =
+ site.pretenure_decision();
+ bool deopt = true;
+ if (current_decision == AllocationSite::kUndecided ||
+ current_decision == AllocationSite::kMaybeTenure) {
+ site.set_deopt_dependent_code(true);
+ site.set_pretenure_decision(AllocationSite::kTenure);
+ } else {
+ deopt = false;
+ }
+ if (FLAG_trace_pretenuring_statistics) {
+ PrintIsolate(isolate,
+ "pretenuring manually requested: AllocationSite(%p): "
+ "%s => %s\n",
+ reinterpret_cast<void*>(site.ptr()),
+ site.PretenureDecisionName(current_decision),
+ site.PretenureDecisionName(site.pretenure_decision()));
+ }
+
+ ResetPretenuringFeedback(site);
+ return deopt;
+}
+
} // namespace
void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
@@ -1020,7 +1123,8 @@ void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
}
bool Heap::DeoptMaybeTenuredAllocationSites() {
- return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+ return new_space_ && new_space_->IsAtMaximumCapacity() &&
+ maximum_size_scavenges_ == 0;
}
void Heap::ProcessPretenuringFeedback() {
@@ -1060,7 +1164,18 @@ void Heap::ProcessPretenuringFeedback() {
}
}
- // Step 2: Deopt maybe tenured allocation sites if necessary.
+ // Step 2: Pretenure allocation sites for manual requests.
+ if (allocation_sites_to_pretenure_) {
+ while (!allocation_sites_to_pretenure_->empty()) {
+ auto site = allocation_sites_to_pretenure_->Pop();
+ if (PretenureAllocationSiteManually(isolate_, site)) {
+ trigger_deoptimization = true;
+ }
+ }
+ allocation_sites_to_pretenure_.reset();
+ }
+
+ // Step 3: Deopt maybe tenured allocation sites if necessary.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
if (deopt_maybe_tenured) {
ForeachAllocationSite(
@@ -1096,6 +1211,14 @@ void Heap::ProcessPretenuringFeedback() {
}
}
+void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
+ if (!allocation_sites_to_pretenure_) {
+ allocation_sites_to_pretenure_.reset(
+ new GlobalHandleVector<AllocationSite>(this));
+ }
+ allocation_sites_to_pretenure_->Push(site);
+}
+
void Heap::InvalidateCodeDeoptimizationData(Code code) {
CodePageMemoryModificationScope modification_scope(code);
code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
@@ -1145,7 +1268,10 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
- UPDATE_COUNTERS_FOR_SPACE(new_space)
+ if (new_space()) {
+ UPDATE_COUNTERS_FOR_SPACE(new_space)
+ }
+
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
@@ -1175,16 +1301,14 @@ void Heap::GarbageCollectionEpilogueInSafepoint(GarbageCollector collector) {
ZapFromSpace();
}
- {
+ if (new_space()) {
TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE);
ReduceNewSpaceSize();
}
// Set main thread state back to Running from CollectionRequested.
- LocalHeap* main_thread_local_heap = isolate()->main_thread_local_heap();
-
LocalHeap::ThreadState old_state =
- main_thread_local_heap->state_.exchange(LocalHeap::kRunning);
+ main_thread_local_heap()->state_.exchange(LocalHeap::kRunning);
CHECK(old_state == LocalHeap::kRunning ||
old_state == LocalHeap::kCollectionRequested);
@@ -1404,8 +1528,7 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
if (gc_reason == GarbageCollectionReason::kLastResort) {
InvokeNearHeapLimitCallback();
}
- RuntimeCallTimerScope runtime_timer(
- isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage);
// The optimizing compiler may be unnecessarily holding on to memory.
isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock);
@@ -1512,6 +1635,7 @@ void Heap::EnsureFillerObjectAtTop() {
// evacuation of a non-full new space (or if we are on the last page) there
// may be uninitialized memory behind top. We fill the remainder of the page
// with a filler.
+ if (!new_space_) return;
Address to_top = new_space_->top();
Page* page = Page::FromAddress(to_top - kTaggedSize);
if (page->Contains(to_top)) {
@@ -1593,6 +1717,16 @@ bool Heap::CollectGarbage(AllocationSpace space,
if (collector == MARK_COMPACTOR) {
committed_memory_before = CommittedOldGenerationMemory();
+ if (cpp_heap()) {
+ // CppHeap needs a stack marker at the top of all entry points to allow
+ // deterministic passes over the stack. E.g., a verifier that should only
+ // find a subset of references of the marker.
+ //
+ // TODO(chromium:1056170): Consider adding a component that keeps track
+ // of relevant GC stack regions where interesting pointers can be found.
+ static_cast<v8::internal::CppHeap*>(cpp_heap())
+ ->SetStackEndOfCurrentGC(v8::base::Stack::GetCurrentStackPosition());
+ }
}
{
@@ -1800,7 +1934,7 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
StartIncrementalMarking(
gc_flags,
- OldGenerationSpaceAvailable() <= new_space_->Capacity()
+ OldGenerationSpaceAvailable() <= NewSpaceCapacity()
? GarbageCollectionReason::kAllocationLimit
: GarbageCollectionReason::kGlobalAllocationLimit,
gc_callback_flags);
@@ -1816,7 +1950,7 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReachedBackground() {
const size_t old_generation_space_available = OldGenerationSpaceAvailable();
- if (old_generation_space_available < new_space_->Capacity()) {
+ if (old_generation_space_available < NewSpaceCapacity()) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
}
}
@@ -1943,6 +2077,7 @@ static void VerifyStringTable(Isolate* isolate) {
#endif // VERIFY_HEAP
void Heap::EnsureFromSpaceIsCommitted() {
+ if (!new_space_) return;
if (new_space_->CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
@@ -2024,6 +2159,9 @@ size_t Heap::PerformGarbageCollection(
SafepointScope safepoint_scope(this);
+ // Shared isolates cannot have any clients when running GC at the moment.
+ DCHECK_IMPLIES(IsShared(), !isolate()->HasClientIsolates());
+
collection_barrier_->StopTimeToCollectionTimer();
#ifdef VERIFY_HEAP
@@ -2039,7 +2177,7 @@ size_t Heap::PerformGarbageCollection(
EnsureFromSpaceIsCommitted();
size_t start_young_generation_size =
- Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
+ NewSpaceSize() + (new_lo_space() ? new_lo_space()->SizeOfObjects() : 0);
switch (collector) {
case MARK_COMPACTOR:
@@ -2173,7 +2311,7 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
}
size_t old_gen_size = OldGenerationSizeOfObjects();
- size_t new_space_capacity = new_space()->Capacity();
+ size_t new_space_capacity = NewSpaceCapacity();
HeapGrowingMode mode = CurrentHeapGrowingMode();
if (collector == MARK_COMPACTOR) {
@@ -2219,8 +2357,7 @@ void Heap::RecomputeLimits(GarbageCollector collector) {
}
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(
- isolate(), RuntimeCallCounterId::kGCPrologueCallback);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCPrologueCallback);
for (const GCCallbackTuple& info : gc_prologue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -2230,8 +2367,7 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
}
void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope runtime_timer(
- isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
+ RCS_SCOPE(isolate(), RuntimeCallCounterId::kGCEpilogueCallback);
for (const GCCallbackTuple& info : gc_epilogue_callbacks_) {
if (gc_type & info.gc_type) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
@@ -2280,7 +2416,7 @@ void Heap::MarkCompact() {
void Heap::MinorMarkCompact() {
#ifdef ENABLE_MINOR_MC
- DCHECK(FLAG_minor_mc);
+ DCHECK(FLAG_minor_mc && !FLAG_single_generation);
PauseAllocationObserversScope pause_observers(this);
SetGCState(MINOR_MARK_COMPACT);
@@ -2387,6 +2523,8 @@ void Heap::EvacuateYoungGeneration() {
}
void Heap::Scavenge() {
+ DCHECK(!FLAG_single_generation);
+
if (fast_promotion_mode_ && CanPromoteYoungAndExpandOldGeneration(0)) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
@@ -2435,12 +2573,15 @@ void Heap::Scavenge() {
}
void Heap::ComputeFastPromotionMode() {
+ if (!new_space_) return;
+
const size_t survived_in_new_space =
- survived_last_scavenge_ * 100 / new_space_->Capacity();
+ survived_last_scavenge_ * 100 / NewSpaceCapacity();
fast_promotion_mode_ =
!FLAG_optimize_for_size && FLAG_fast_promotion_new_space &&
!ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() &&
survived_in_new_space >= kMinPromotedPercentForFastPromotionMode;
+
if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) {
PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n",
fast_promotion_mode_ ? "true" : "false",
@@ -2488,6 +2629,8 @@ bool Heap::ExternalStringTable::Contains(String string) {
void Heap::UpdateExternalString(String string, size_t old_payload,
size_t new_payload) {
DCHECK(string.IsExternalString());
+ if (FLAG_enable_third_party_heap) return;
+
Page* page = Page::FromHeapObject(string);
if (old_payload > new_payload) {
@@ -2502,7 +2645,7 @@ void Heap::UpdateExternalString(String string, size_t old_payload,
String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
HeapObject obj = HeapObject::cast(*p);
- MapWord first_word = obj.map_word();
+ MapWord first_word = obj.map_word(kRelaxedLoad);
String new_string;
@@ -2863,7 +3006,7 @@ HeapObject Heap::AlignWithFiller(ReadOnlyRoots roots, HeapObject object,
void* Heap::AllocateExternalBackingStore(
const std::function<void*(size_t)>& allocate, size_t byte_length) {
- if (!always_allocate()) {
+ if (!always_allocate() && new_space()) {
size_t new_space_backing_store_bytes =
new_space()->ExternalBackingStoreBytes();
if (new_space_backing_store_bytes >= 2 * kMaxSemiSpaceSize &&
@@ -2874,8 +3017,6 @@ void* Heap::AllocateExternalBackingStore(
GarbageCollectionReason::kExternalMemoryPressure);
}
}
- // TODO(ulan): Perform GCs proactively based on the byte_length and
- // the current external backing store counters.
void* result = allocate(byte_length);
if (result) return result;
if (!always_allocate()) {
@@ -2948,7 +3089,7 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
DCHECK_GT(size, 2 * kTaggedSize);
filler.set_map_after_allocation(roots.unchecked_free_space_map(),
SKIP_WRITE_BARRIER);
- FreeSpace::cast(filler).relaxed_write_size(size);
+ FreeSpace::cast(filler).set_size(size, kRelaxedStore);
if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) {
MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue),
(size / kTaggedSize) - 2);
@@ -2957,7 +3098,7 @@ HeapObject CreateFillerObjectAtImpl(ReadOnlyRoots roots, Address addr, int size,
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are nullptr.
- DCHECK((filler.map_slot().contains_value(kNullAddress) &&
+ DCHECK((filler.map_slot().contains_map_value(kNullAddress) &&
!Heap::FromWritableHeapObject(filler)->deserialization_complete()) ||
filler.map().IsMap());
@@ -2969,8 +3110,7 @@ void VerifyNoNeedToClearSlots(Address start, Address end) {
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromAddress(start);
if (basic_chunk->InReadOnlySpace()) return;
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
- // TODO(ulan): Support verification of large pages.
- if (chunk->InYoungGeneration() || chunk->IsLargePage()) return;
+ if (chunk->InYoungGeneration()) return;
BaseSpace* space = chunk->owner();
space->heap()->VerifySlotRangeHasNoRecordedSlots(start, end);
}
@@ -3046,6 +3186,9 @@ bool Heap::IsImmovable(HeapObject object) {
}
bool Heap::IsLargeObject(HeapObject object) {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
+ return third_party_heap::Heap::InLargeObjectSpace(object.address()) ||
+ third_party_heap::Heap::InSpace(object.address(), CODE_LO_SPACE);
return BasicMemoryChunk::FromHeapObject(object)->IsLargePage();
}
@@ -3109,6 +3252,9 @@ void Heap::OnMoveEvent(HeapObject target, HeapObject source,
if (target.IsSharedFunctionInfo()) {
LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source.address(),
target.address()));
+ } else if (target.IsNativeContext()) {
+ PROFILE(isolate_,
+ NativeContextMoveEvent(source.address(), target.address()));
}
if (FLAG_verify_predictable) {
@@ -3184,7 +3330,8 @@ FixedArrayBase Heap::LeftTrimFixedArray(FixedArrayBase object,
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
- RELAXED_WRITE_FIELD(object, bytes_to_trim, map);
+ RELAXED_WRITE_FIELD(object, bytes_to_trim,
+ Object(MapWord::FromMap(map).ptr()));
RELAXED_WRITE_FIELD(object, bytes_to_trim + kTaggedSize,
Smi::FromInt(len - elements_to_trim));
@@ -3298,7 +3445,7 @@ void Heap::CreateFillerForArray(T object, int elements_to_trim,
// Initialize header of the trimmed array. We are storing the new length
// using release store after creating a filler for the left-over space to
// avoid races with the sweeper thread.
- object.synchronized_set_length(object.length() - elements_to_trim);
+ object.set_length(object.length() - elements_to_trim, kReleaseStore);
// Notify the heap object allocation tracker of change in object layout. The
// array may not be moved during GC, and size has to be adjusted nevertheless.
@@ -3451,8 +3598,6 @@ void Heap::ActivateMemoryReducerIfNeeded() {
}
void Heap::ReduceNewSpaceSize() {
- // TODO(ulan): Unify this constant with the similar constant in
- // GCIdleTimeHandler once the change is merged to 4.5.
static const size_t kLowAllocationThroughput = 1000;
const double allocation_throughput =
tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
@@ -3468,6 +3613,12 @@ void Heap::ReduceNewSpaceSize() {
}
}
+size_t Heap::NewSpaceSize() { return new_space() ? new_space()->Size() : 0; }
+
+size_t Heap::NewSpaceCapacity() {
+ return new_space() ? new_space()->Capacity() : 0;
+}
+
void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsMarking() &&
@@ -3582,6 +3733,8 @@ class SlotCollectingVisitor final : public ObjectVisitor {
UNREACHABLE();
}
+ void VisitMapPointer(HeapObject object) override {} // do nothing by default
+
int number_of_slots() { return static_cast<int>(slots_.size()); }
MaybeObjectSlot slot(int i) { return slots_[i]; }
@@ -3613,13 +3766,13 @@ void Heap::VerifyObjectLayoutChange(HeapObject object, Map new_map) {
// Check that the set of slots before and after the transition match.
SlotCollectingVisitor old_visitor;
object.IterateFast(&old_visitor);
- MapWord old_map_word = object.map_word();
+ MapWord old_map_word = object.map_word(kRelaxedLoad);
// Temporarily set the new map to iterate new slots.
- object.set_map_word(MapWord::FromMap(new_map));
+ object.set_map_word(MapWord::FromMap(new_map), kRelaxedStore);
SlotCollectingVisitor new_visitor;
object.IterateFast(&new_visitor);
// Restore the old map.
- object.set_map_word(old_map_word);
+ object.set_map_word(old_map_word, kRelaxedStore);
DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
for (int i = 0; i < new_visitor.number_of_slots(); i++) {
DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
@@ -3692,12 +3845,15 @@ void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
}
}
-double Heap::MonotonicallyIncreasingTimeInMs() {
+double Heap::MonotonicallyIncreasingTimeInMs() const {
return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
static_cast<double>(base::Time::kMillisecondsPerSecond);
}
-void Heap::VerifyNewSpaceTop() { new_space()->VerifyTop(); }
+void Heap::VerifyNewSpaceTop() {
+ if (!new_space()) return;
+ new_space()->VerifyTop();
+}
bool Heap::IdleNotification(int idle_time_in_ms) {
return IdleNotification(
@@ -3866,6 +4022,11 @@ void Heap::AppendArrayBufferExtension(JSArrayBuffer object,
array_buffer_sweeper_->Append(object, extension);
}
+void Heap::DetachArrayBufferExtension(JSArrayBuffer object,
+ ArrayBufferExtension* extension) {
+ return array_buffer_sweeper_->Detach(object, extension);
+}
+
void Heap::AutomaticallyRestoreInitialHeapLimit(double threshold_percent) {
initial_max_old_generation_size_threshold_ =
initial_max_old_generation_size_ * threshold_percent;
@@ -4009,13 +4170,23 @@ bool Heap::Contains(HeapObject value) const {
return false;
}
return HasBeenSetUp() &&
- (new_space_->ToSpaceContains(value) || old_space_->Contains(value) ||
- code_space_->Contains(value) || map_space_->Contains(value) ||
- lo_space_->Contains(value) || code_lo_space_->Contains(value) ||
- new_lo_space_->Contains(value));
+ ((new_space_ && new_space_->ToSpaceContains(value)) ||
+ old_space_->Contains(value) || code_space_->Contains(value) ||
+ map_space_->Contains(value) || lo_space_->Contains(value) ||
+ code_lo_space_->Contains(value) ||
+ (new_lo_space_ && new_lo_space_->Contains(value)));
+}
+
+bool Heap::SharedHeapContains(HeapObject value) const {
+ if (shared_old_space_)
+ return shared_old_space_->Contains(value) ||
+ shared_map_space_->Contains(value);
+ return false;
}
bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL)
+ return third_party_heap::Heap::InSpace(value.address(), space);
if (memory_allocator()->IsOutsideAllocatedSpace(value.address())) {
return false;
}
@@ -4042,6 +4213,8 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const {
UNREACHABLE();
}
+bool Heap::IsShared() { return isolate()->is_shared(); }
+
bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const {
if (memory_allocator()->IsOutsideAllocatedSpace(addr)) {
return false;
@@ -4116,7 +4289,7 @@ void Heap::Verify() {
VerifySmisVisitor smis_visitor;
IterateSmiRoots(&smis_visitor);
- new_space_->Verify(isolate());
+ if (new_space_) new_space_->Verify(isolate());
old_space_->Verify(isolate(), &visitor);
map_space_->Verify(isolate(), &visitor);
@@ -4126,7 +4299,7 @@ void Heap::Verify() {
lo_space_->Verify(isolate());
code_lo_space_->Verify(isolate());
- new_lo_space_->Verify(isolate());
+ if (new_lo_space_) new_lo_space_->Verify(isolate());
VerifyStringTable(isolate());
}
@@ -4147,7 +4320,7 @@ class SlotVerifyingVisitor : public ObjectVisitor {
ObjectSlot end) override {
#ifdef DEBUG
for (ObjectSlot slot = start; slot < end; ++slot) {
- DCHECK(!HasWeakHeapObjectTag(*slot));
+ DCHECK(!MapWord::IsPacked((*slot).ptr()) || !HasWeakHeapObjectTag(*slot));
}
#endif // DEBUG
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
@@ -4315,7 +4488,7 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
#endif
void Heap::ZapFromSpace() {
- if (!new_space_->IsFromSpaceCommitted()) return;
+ if (!new_space_ || !new_space_->IsFromSpaceCommitted()) return;
for (Page* page : PageRange(new_space_->from_space().first_page(), nullptr)) {
memory_allocator()->ZapBlock(page->area_start(),
page->HighWaterMark() - page->area_start(),
@@ -4397,18 +4570,20 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
- for (FullObjectSlot p = start; p < end; ++p) FixHandle(p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ FixHandle(p);
+ }
}
private:
inline void FixHandle(FullObjectSlot p) {
if (!(*p).IsHeapObject()) return;
HeapObject current = HeapObject::cast(*p);
- if (!current.map_word().IsForwardingAddress() &&
+ if (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller()) {
#ifdef DEBUG
// We need to find a FixedArrayBase map after walking the fillers.
- while (!current.map_word().IsForwardingAddress() &&
+ while (!current.map_word(kRelaxedLoad).IsForwardingAddress() &&
current.IsFreeSpaceOrFiller()) {
Address next = current.ptr();
if (current.map() == ReadOnlyRoots(heap_).one_pointer_filler_map()) {
@@ -4421,7 +4596,7 @@ class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
}
current = HeapObject::cast(Object(next));
}
- DCHECK(current.map_word().IsForwardingAddress() ||
+ DCHECK(current.map_word(kRelaxedLoad).IsForwardingAddress() ||
current.IsFixedArrayBase());
#endif // DEBUG
p.store(Smi::zero());
@@ -4502,8 +4677,10 @@ void Heap::IterateRoots(RootVisitor* v, base::EnumSet<SkipRoot> options) {
// Iterate over local handles in handle scopes.
FixStaleLeftTrimmedHandlesVisitor left_trim_visitor(this);
#ifndef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
- isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
- isolate_->handle_scope_implementer()->Iterate(v);
+ if (!options.contains(SkipRoot::kMainThreadHandles)) {
+ isolate_->handle_scope_implementer()->Iterate(&left_trim_visitor);
+ isolate_->handle_scope_implementer()->Iterate(v);
+ }
#endif
safepoint_->Iterate(&left_trim_visitor);
@@ -4772,8 +4949,8 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->end_marker = HeapStats::kEndMarker;
*stats->ro_space_size = read_only_space_->Size();
*stats->ro_space_capacity = read_only_space_->Capacity();
- *stats->new_space_size = new_space_->Size();
- *stats->new_space_capacity = new_space_->Capacity();
+ *stats->new_space_size = NewSpaceSize();
+ *stats->new_space_capacity = NewSpaceCapacity();
*stats->old_space_size = old_space_->SizeOfObjects();
*stats->old_space_capacity = old_space_->Capacity();
*stats->code_space_size = code_space_->SizeOfObjects();
@@ -5036,9 +5213,9 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
const base::Optional<size_t> global_memory_available =
GlobalMemoryAvailable();
- if (old_generation_space_available > new_space_->Capacity() &&
+ if (old_generation_space_available > NewSpaceCapacity() &&
(!global_memory_available ||
- global_memory_available > new_space_->Capacity())) {
+ global_memory_available > NewSpaceCapacity())) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForMemoryUsage()) {
@@ -5065,8 +5242,10 @@ void Heap::EnableInlineAllocation() {
inline_allocation_disabled_ = false;
// Update inline allocation limit for new space.
- new_space()->AdvanceAllocationObservers();
- new_space()->UpdateInlineAllocationLimit(0);
+ if (new_space()) {
+ new_space()->AdvanceAllocationObservers();
+ new_space()->UpdateInlineAllocationLimit(0);
+ }
}
void Heap::DisableInlineAllocation() {
@@ -5074,7 +5253,9 @@ void Heap::DisableInlineAllocation() {
inline_allocation_disabled_ = true;
// Update inline allocation limit for new space.
- new_space()->UpdateInlineAllocationLimit(0);
+ if (new_space()) {
+ new_space()->UpdateInlineAllocationLimit(0);
+ }
// Update inline allocation limit for old spaces.
PagedSpaceIterator spaces(this);
@@ -5138,6 +5319,10 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
return HeapObject();
}
+namespace {
+V8_DECLARE_ONCE(initialize_shared_code_range_once);
+} // namespace
+
void Heap::SetUp() {
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
allocation_timeout_ = NextAllocationTimeout();
@@ -5159,9 +5344,45 @@ void Heap::SetUp() {
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
~kMmapRegionMask;
+ v8::PageAllocator* code_page_allocator;
+ if (isolate_->RequiresCodeRange() || code_range_size_ != 0) {
+ const size_t requested_size =
+ code_range_size_ == 0 ? kMaximalCodeRangeSize : code_range_size_;
+ // When a target requires the code range feature, we put all code objects in
+ // a contiguous range of virtual address space, so that they can call each
+ // other with near calls.
+ if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
+ // When sharing a pointer cage among Isolates, also share the
+ // CodeRange. isolate_->page_allocator() is the process-wide pointer
+ // compression cage's PageAllocator.
+ base::CallOnce(&initialize_shared_code_range_once,
+ &CodeRange::InitializeProcessWideCodeRangeOnce,
+ isolate_->page_allocator(), requested_size);
+ code_range_ = CodeRange::GetProcessWideCodeRange();
+ } else {
+ code_range_ = std::make_shared<CodeRange>();
+ if (!code_range_->InitReservation(isolate_->page_allocator(),
+ requested_size)) {
+ V8::FatalProcessOutOfMemory(
+ isolate_, "Failed to reserve virtual memory for CodeRange");
+ }
+ }
+
+ LOG(isolate_,
+ NewEvent("CodeRange",
+ reinterpret_cast<void*>(code_range_->reservation()->address()),
+ code_range_size_));
+
+ isolate_->AddCodeRange(code_range_->reservation()->region().begin(),
+ code_range_->reservation()->region().size());
+ code_page_allocator = code_range_->page_allocator();
+ } else {
+ code_page_allocator = isolate_->page_allocator();
+ }
+
// Set up memory allocator.
memory_allocator_.reset(
- new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
+ new MemoryAllocator(isolate_, code_page_allocator, MaxReserved()));
mark_compact_collector_.reset(new MarkCompactCollector(this));
@@ -5203,49 +5424,6 @@ void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
read_only_space_ = space;
}
-uint8_t* Heap::RemapEmbeddedBuiltinsIntoCodeRange(
- const uint8_t* embedded_blob_code, size_t embedded_blob_code_size) {
- const base::AddressRegion& code_range = memory_allocator()->code_range();
-
- CHECK_NE(code_range.begin(), kNullAddress);
- CHECK(!code_range.is_empty());
-
- v8::PageAllocator* code_page_allocator =
- memory_allocator()->code_page_allocator();
-
- const size_t kAllocatePageSize = code_page_allocator->AllocatePageSize();
- size_t allocate_code_size =
- RoundUp(embedded_blob_code_size, kAllocatePageSize);
-
- // Allocate the re-embedded code blob in the end.
- void* hint = reinterpret_cast<void*>(code_range.end() - allocate_code_size);
-
- void* embedded_blob_copy = code_page_allocator->AllocatePages(
- hint, allocate_code_size, kAllocatePageSize, PageAllocator::kNoAccess);
-
- if (!embedded_blob_copy) {
- V8::FatalProcessOutOfMemory(
- isolate(), "Can't allocate space for re-embedded builtins");
- }
-
- size_t code_size =
- RoundUp(embedded_blob_code_size, code_page_allocator->CommitPageSize());
-
- if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
- PageAllocator::kReadWrite)) {
- V8::FatalProcessOutOfMemory(isolate(),
- "Re-embedded builtins: set permissions");
- }
- memcpy(embedded_blob_copy, embedded_blob_code, embedded_blob_code_size);
-
- if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
- PageAllocator::kReadExecute)) {
- V8::FatalProcessOutOfMemory(isolate(),
- "Re-embedded builtins: set permissions");
- }
- return reinterpret_cast<uint8_t*>(embedded_blob_copy);
-}
-
class StressConcurrentAllocationObserver : public AllocationObserver {
public:
explicit StressConcurrentAllocationObserver(Heap* heap)
@@ -5269,15 +5447,19 @@ class StressConcurrentAllocationObserver : public AllocationObserver {
void Heap::SetUpSpaces() {
// Ensure SetUpFromReadOnlySpace has been ran.
DCHECK_NOT_NULL(read_only_space_);
- space_[NEW_SPACE] = new_space_ =
- new NewSpace(this, memory_allocator_->data_page_allocator(),
- initial_semispace_size_, max_semi_space_size_);
+ if (!FLAG_single_generation) {
+ space_[NEW_SPACE] = new_space_ =
+ new NewSpace(this, memory_allocator_->data_page_allocator(),
+ initial_semispace_size_, max_semi_space_size_);
+ }
space_[OLD_SPACE] = old_space_ = new OldSpace(this);
space_[CODE_SPACE] = code_space_ = new CodeSpace(this);
space_[MAP_SPACE] = map_space_ = new MapSpace(this);
space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this);
- space_[NEW_LO_SPACE] = new_lo_space_ =
- new NewLargeObjectSpace(this, new_space_->Capacity());
+ if (!FLAG_single_generation) {
+ space_[NEW_LO_SPACE] = new_lo_space_ =
+ new NewLargeObjectSpace(this, NewSpaceCapacity());
+ }
space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this);
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
@@ -5300,6 +5482,8 @@ void Heap::SetUpSpaces() {
dead_object_stats_.reset(new ObjectStats(this));
}
local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
+ embedder_roots_handler_ =
+ &local_embedder_heap_tracer()->default_embedder_roots_handler();
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5311,10 +5495,12 @@ void Heap::SetUpSpaces() {
}
#endif // ENABLE_MINOR_MC
- scavenge_job_.reset(new ScavengeJob());
- scavenge_task_observer_.reset(new ScavengeTaskObserver(
- this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
- new_space()->AddAllocationObserver(scavenge_task_observer_.get());
+ if (!FLAG_single_generation) {
+ scavenge_job_.reset(new ScavengeJob());
+ scavenge_task_observer_.reset(new ScavengeTaskObserver(
+ this, ScavengeJob::YoungGenerationTaskTriggerSize(this)));
+ new_space()->AddAllocationObserver(scavenge_task_observer_.get());
+ }
SetGetExternallyAllocatedMemoryInBytesCallback(
DefaultGetExternallyAllocatedMemoryInBytesCallback);
@@ -5325,7 +5511,7 @@ void Heap::SetUpSpaces() {
AddAllocationObserversToAllSpaces(stress_marking_observer_,
stress_marking_observer_);
}
- if (FLAG_stress_scavenge > 0) {
+ if (FLAG_stress_scavenge > 0 && new_space()) {
stress_scavenge_observer_ = new StressScavengeObserver(this);
new_space()->AddAllocationObserver(stress_scavenge_observer_);
}
@@ -5333,6 +5519,11 @@ void Heap::SetUpSpaces() {
write_protect_code_memory_ = FLAG_write_protect_code_memory;
}
+void Heap::InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap) {
+ DCHECK_NULL(main_thread_local_heap_);
+ main_thread_local_heap_ = main_thread_local_heap;
+}
+
void Heap::InitializeHashSeed() {
DCHECK(!deserialization_complete_);
uint64_t new_hash_seed;
@@ -5438,6 +5629,14 @@ void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
local_embedder_heap_tracer()->SetRemoteTracer(tracer);
}
+void Heap::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
+ embedder_roots_handler_ = handler;
+}
+
+EmbedderRootsHandler* Heap::GetEmbedderRootsHandler() const {
+ return embedder_roots_handler_;
+}
+
EmbedderHeapTracer* Heap::GetEmbedderHeapTracer() const {
return local_embedder_heap_tracer()->remote_tracer();
}
@@ -5461,6 +5660,11 @@ EmbedderHeapTracer::TraceFlags Heap::flags_for_embedder_tracer() const {
return EmbedderHeapTracer::TraceFlags::kNoFlags;
}
+const cppgc::EmbedderStackState* Heap::overriden_stack_state() const {
+ const auto* cpp_heap = CppHeap::From(cpp_heap_);
+ return cpp_heap ? cpp_heap->override_stack_state() : nullptr;
+}
+
void Heap::RegisterExternallyReferencedObject(Address* location) {
GlobalHandles::MarkTraced(location);
Object object(*location);
@@ -5524,7 +5728,10 @@ void Heap::TearDown() {
}
}
- new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
+ if (new_space()) {
+ new_space()->RemoveAllocationObserver(scavenge_task_observer_.get());
+ }
+
scavenge_task_observer_.reset();
scavenge_job_.reset();
@@ -5541,7 +5748,7 @@ void Heap::TearDown() {
delete stress_marking_observer_;
stress_marking_observer_ = nullptr;
}
- if (FLAG_stress_scavenge > 0) {
+ if (FLAG_stress_scavenge > 0 && new_space()) {
new_space()->RemoveAllocationObserver(stress_scavenge_observer_);
delete stress_scavenge_observer_;
stress_scavenge_observer_ = nullptr;
@@ -5578,6 +5785,8 @@ void Heap::TearDown() {
dead_object_stats_.reset();
local_embedder_heap_tracer_.reset();
+ embedder_roots_handler_ = nullptr;
+
if (cpp_heap_) {
CppHeap::From(cpp_heap_)->DetachIsolate();
cpp_heap_ = nullptr;
@@ -5587,6 +5796,8 @@ void Heap::TearDown() {
tracer_.reset();
+ allocation_sites_to_pretenure_.reset();
+
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
@@ -5608,6 +5819,24 @@ void Heap::TearDown() {
memory_allocator_.reset();
}
+void Heap::InitSharedSpaces() {
+ shared_old_space_ = isolate()->shared_isolate()->heap()->old_space();
+ shared_old_allocator_.reset(
+ new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_));
+
+ shared_map_space_ = isolate()->shared_isolate()->heap()->map_space();
+ shared_map_allocator_.reset(
+ new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_));
+}
+
+void Heap::DeinitSharedSpaces() {
+ shared_old_space_ = nullptr;
+ shared_old_allocator_.reset();
+
+ shared_map_space_ = nullptr;
+ shared_map_allocator_.reset();
+}
+
void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
GCType gc_type, void* data) {
DCHECK_NOT_NULL(callback);
@@ -5685,7 +5914,7 @@ Handle<WeakArrayList> CompactWeakArrayList(Heap* heap,
} // anonymous namespace
-void Heap::CompactWeakArrayLists(AllocationType allocation) {
+void Heap::CompactWeakArrayLists() {
// Find known PrototypeUsers and compact them.
std::vector<Handle<PrototypeInfo>> prototype_infos;
{
@@ -5702,20 +5931,18 @@ void Heap::CompactWeakArrayLists(AllocationType allocation) {
for (auto& prototype_info : prototype_infos) {
Handle<WeakArrayList> array(
WeakArrayList::cast(prototype_info->prototype_users()), isolate());
- DCHECK_IMPLIES(allocation == AllocationType::kOld,
- InOldSpace(*array) ||
- *array == ReadOnlyRoots(this).empty_weak_array_list());
+ DCHECK(InOldSpace(*array) ||
+ *array == ReadOnlyRoots(this).empty_weak_array_list());
WeakArrayList new_array = PrototypeUsers::Compact(
- array, this, JSObject::PrototypeRegistryCompactionCallback, allocation);
+ array, this, JSObject::PrototypeRegistryCompactionCallback,
+ AllocationType::kOld);
prototype_info->set_prototype_users(new_array);
}
// Find known WeakArrayLists and compact them.
Handle<WeakArrayList> scripts(script_list(), isolate());
- DCHECK_IMPLIES(
- !V8_ENABLE_THIRD_PARTY_HEAP_BOOL && allocation == AllocationType::kOld,
- InOldSpace(*scripts));
- scripts = CompactWeakArrayList(this, scripts, allocation);
+ DCHECK(InOldSpace(*scripts));
+ scripts = CompactWeakArrayList(this, scripts, AllocationType::kOld);
set_script_list(*scripts);
}
@@ -5847,7 +6074,6 @@ void Heap::VerifyClearedSlot(HeapObject object, ObjectSlot slot) {
void Heap::VerifySlotRangeHasNoRecordedSlots(Address start, Address end) {
#ifndef V8_DISABLE_WRITE_BARRIERS
Page* page = Page::FromAddress(start);
- DCHECK(!page->IsLargePage());
DCHECK(!page->InYoungGeneration());
RememberedSet<OLD_TO_NEW>::CheckNoneInRange(page, start, end);
#endif
@@ -5887,18 +6113,26 @@ PagedSpace* PagedSpaceIterator::Next() {
}
SpaceIterator::SpaceIterator(Heap* heap)
- : heap_(heap), current_space_(FIRST_MUTABLE_SPACE - 1) {}
+ : heap_(heap), current_space_(FIRST_MUTABLE_SPACE) {}
SpaceIterator::~SpaceIterator() = default;
bool SpaceIterator::HasNext() {
- // Iterate until no more spaces.
- return current_space_ != LAST_SPACE;
+ while (current_space_ <= LAST_MUTABLE_SPACE) {
+ Space* space = heap_->space(current_space_);
+ if (space) return true;
+ ++current_space_;
+ }
+
+ // No more spaces left.
+ return false;
}
Space* SpaceIterator::Next() {
DCHECK(HasNext());
- return heap_->space(++current_space_);
+ Space* space = heap_->space(current_space_++);
+ DCHECK_NOT_NULL(space);
+ return space;
}
class HeapObjectsFilter {
@@ -5922,14 +6156,14 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
bool SkipObject(HeapObject object) override {
if (object.IsFreeSpaceOrFiller()) return true;
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
+ Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
if (reachable_.count(chunk) == 0) return true;
return reachable_[chunk]->count(object) == 0;
}
private:
bool MarkAsReachable(HeapObject object) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(object);
+ Address chunk = object.ptr() & ~kLogicalChunkAlignmentMask;
if (reachable_.count(chunk) == 0) {
reachable_[chunk] = new std::unordered_set<HeapObject, Object::Hasher>();
}
@@ -5938,11 +6172,20 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
return true;
}
+ static constexpr intptr_t kLogicalChunkAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static constexpr intptr_t kLogicalChunkAlignmentMask =
+ kLogicalChunkAlignment - 1;
+
class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
explicit MarkingVisitor(UnreachableObjectsFilter* filter)
: filter_(filter) {}
+ void VisitMapPointer(HeapObject object) override {
+ MarkHeapObject(Map::unchecked_cast(object.map()));
+ }
void VisitPointers(HeapObject host, ObjectSlot start,
ObjectSlot end) override {
MarkPointers(MaybeObjectSlot(start), MaybeObjectSlot(end));
@@ -6017,8 +6260,7 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
Heap* heap_;
DISALLOW_GARBAGE_COLLECTION(no_gc_)
- std::unordered_map<BasicMemoryChunk*,
- std::unordered_set<HeapObject, Object::Hasher>*>
+ std::unordered_map<Address, std::unordered_set<HeapObject, Object::Hasher>*>
reachable_;
};
@@ -6041,13 +6283,14 @@ HeapObjectIterator::HeapObjectIterator(
break;
}
object_iterator_ = space_iterator_->Next()->GetObjectIterator(heap_);
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) heap_->tp_heap_->ResetIterator();
}
HeapObjectIterator::~HeapObjectIterator() {
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
- if (filtering_ != kNoFiltering) {
+ if (!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && filtering_ != kNoFiltering) {
DCHECK_NULL(object_iterator_);
}
#endif
@@ -6064,6 +6307,7 @@ HeapObject HeapObjectIterator::Next() {
}
HeapObject HeapObjectIterator::NextObject() {
+ if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) return heap_->tp_heap_->NextObject();
// No iterator means we are done.
if (object_iterator_.get() == nullptr) return HeapObject();
@@ -6288,8 +6532,6 @@ MaybeHandle<JSFinalizationRegistry> Heap::DequeueDirtyJSFinalizationRegistry() {
}
void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
- if (!FLAG_harmony_weak_refs) return;
-
DisallowGarbageCollection no_gc;
Isolate* isolate = this->isolate();
@@ -6319,7 +6561,6 @@ void Heap::RemoveDirtyFinalizationRegistriesOnContext(NativeContext context) {
}
void Heap::KeepDuringJob(Handle<JSReceiver> target) {
- DCHECK(FLAG_harmony_weak_refs);
DCHECK(weak_refs_keep_during_job().IsUndefined() ||
weak_refs_keep_during_job().IsOrderedHashSet());
Handle<OrderedHashSet> table;
@@ -6454,7 +6695,8 @@ void VerifyPointersVisitor::VerifyPointersImpl(TSlot start, TSlot end) {
if (object.GetHeapObject(&heap_object)) {
VerifyHeapObjectImpl(heap_object);
} else {
- CHECK(object.IsSmi() || object.IsCleared());
+ CHECK(object.IsSmi() || object.IsCleared() ||
+ MapWord::IsPacked(object.ptr()));
}
}
}
@@ -6538,7 +6780,7 @@ void Heap::CreateObjectStats() {
}
Map Heap::GcSafeMapOfCodeSpaceObject(HeapObject object) {
- MapWord map_word = object.map_word();
+ MapWord map_word = object.map_word(kRelaxedLoad);
return map_word.IsForwardingAddress() ? map_word.ToForwardingAddress().map()
: map_word.ToMap();
}
@@ -6701,6 +6943,7 @@ template void Heap::WriteBarrierForRange<MaybeObjectSlot>(
template <typename TSlot>
void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
TSlot end_slot) {
+ if (FLAG_disable_write_barriers) return;
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
base::Flags<RangeWriteBarrierMode> mode;
diff --git a/chromium/v8/src/heap/heap.h b/chromium/v8/src/heap/heap.h
index 429f8864be7..4be4d8f7325 100644
--- a/chromium/v8/src/heap/heap.h
+++ b/chromium/v8/src/heap/heap.h
@@ -66,12 +66,16 @@ class ArrayBufferCollector;
class ArrayBufferSweeper;
class BasicMemoryChunk;
class CodeLargeObjectSpace;
+class CodeRange;
class CollectionBarrier;
+class ConcurrentAllocator;
class ConcurrentMarking;
class CppHeap;
class GCIdleTimeHandler;
class GCIdleTimeHeapState;
class GCTracer;
+template <typename T>
+class GlobalHandleVector;
class GlobalSafepoint;
class HeapObjectAllocationTracker;
class HeapObjectsFilter;
@@ -176,6 +180,7 @@ enum class SkipRoot {
kGlobalHandles,
kOldGeneration,
kStack,
+ kMainThreadHandles,
kUnserializable,
kWeak
};
@@ -194,7 +199,7 @@ class StrongRootsEntry {
class AllocationResult {
public:
- static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
+ static inline AllocationResult Retry(AllocationSpace space) {
return AllocationResult(space);
}
@@ -513,6 +518,9 @@ class Heap {
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
+ size_t NewSpaceSize();
+ size_t NewSpaceCapacity();
+
// Move len non-weak tagged elements from src_slot to dst_slot of dst_object.
// The source and destination memory ranges can overlap.
V8_EXPORT_PRIVATE void MoveRange(HeapObject dst_object, ObjectSlot dst_slot,
@@ -566,9 +574,12 @@ class Heap {
V8_EXPORT_PRIVATE int NotifyContextDisposed(bool dependant_context);
void set_native_contexts_list(Object object) {
- native_contexts_list_ = object;
+ native_contexts_list_.store(object.ptr(), std::memory_order_release);
+ }
+
+ Object native_contexts_list() const {
+ return Object(native_contexts_list_.load(std::memory_order_acquire));
}
- Object native_contexts_list() const { return native_contexts_list_; }
void set_allocation_sites_list(Object object) {
allocation_sites_list_ = object;
@@ -693,10 +704,12 @@ class Heap {
void AppendArrayBufferExtension(JSArrayBuffer object,
ArrayBufferExtension* extension);
+ void DetachArrayBufferExtension(JSArrayBuffer object,
+ ArrayBufferExtension* extension);
GlobalSafepoint* safepoint() { return safepoint_.get(); }
- V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs();
+ V8_EXPORT_PRIVATE double MonotonicallyIncreasingTimeInMs() const;
void VerifyNewSpaceTop();
@@ -743,7 +756,7 @@ class Heap {
size_t backing_store_bytes() const { return backing_store_bytes_; }
- void CompactWeakArrayLists(AllocationType allocation);
+ void CompactWeakArrayLists();
V8_EXPORT_PRIVATE void AddRetainedMap(Handle<NativeContext> context,
Handle<Map> map);
@@ -760,6 +773,11 @@ class Heap {
inline bool CanAllocateInReadOnlySpace();
bool deserialization_complete() const { return deserialization_complete_; }
+ // We can only invoke Safepoint() on the main thread local heap after
+ // deserialization is complete. Before that, main_thread_local_heap_ might be
+ // null.
+ V8_INLINE bool CanSafepoint() const { return deserialization_complete(); }
+
bool HasLowAllocationRate();
bool HasHighFragmentation();
bool HasHighFragmentation(size_t used, size_t committed);
@@ -802,6 +820,9 @@ class Heap {
// Sets up the heap memory without creating any objects.
void SetUpSpaces();
+ // Prepares the heap, setting up for deserialization.
+ void InitializeMainThreadLocalHeap(LocalHeap* main_thread_local_heap);
+
// (Re-)Initialize hash seed from flag or RNG.
void InitializeHashSeed();
@@ -812,12 +833,6 @@ class Heap {
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
void CreateObjectStats();
- // If the code range exists, allocates executable pages in the code range and
- // copies the embedded builtins code blob there. Returns address of the copy.
- // The builtins code region will be freed with the code range at tear down.
- uint8_t* RemapEmbeddedBuiltinsIntoCodeRange(const uint8_t* embedded_blob_code,
- size_t embedded_blob_code_size);
-
// Sets the TearDown state, so no new GC tasks get posted.
void StartTearDown();
@@ -827,6 +842,12 @@ class Heap {
// Returns whether SetUp has been called.
bool HasBeenSetUp() const;
+ // Initialializes shared spaces.
+ void InitSharedSpaces();
+
+ // Removes shared spaces again.
+ void DeinitSharedSpaces();
+
// ===========================================================================
// Getters for spaces. =======================================================
// ===========================================================================
@@ -857,6 +878,7 @@ class Heap {
}
inline Isolate* isolate();
+ inline const Isolate* isolate() const;
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_.get();
@@ -870,7 +892,11 @@ class Heap {
return array_buffer_sweeper_.get();
}
- const base::AddressRegion& code_range();
+ const base::AddressRegion& code_region();
+
+ CodeRange* code_range() { return code_range_.get(); }
+
+ LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
// ===========================================================================
// Root set access. ==========================================================
@@ -1076,17 +1102,19 @@ class Heap {
void EnsureSweepingCompleted();
- IncrementalMarking* incremental_marking() {
+ IncrementalMarking* incremental_marking() const {
return incremental_marking_.get();
}
- MarkingBarrier* marking_barrier() { return marking_barrier_.get(); }
+ MarkingBarrier* marking_barrier() const { return marking_barrier_.get(); }
// ===========================================================================
// Concurrent marking API. ===================================================
// ===========================================================================
- ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
+ ConcurrentMarking* concurrent_marking() const {
+ return concurrent_marking_.get();
+ }
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
@@ -1150,6 +1178,16 @@ class Heap {
v8::CppHeap* cpp_heap() const { return cpp_heap_; }
+ const cppgc::EmbedderStackState* overriden_stack_state() const;
+
+ // ===========================================================================
+ // Embedder roots optimizations. =============================================
+ // ===========================================================================
+
+ V8_EXPORT_PRIVATE void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);
+
+ EmbedderRootsHandler* GetEmbedderRootsHandler() const;
+
// ===========================================================================
// External string table API. ================================================
// ===========================================================================
@@ -1192,10 +1230,18 @@ class Heap {
// heaps is required.
V8_EXPORT_PRIVATE bool Contains(HeapObject value) const;
+ // Checks whether an address/object is in the non-read-only heap (including
+ // auxiliary area and unused area). Use IsValidHeapObject if checking both
+ // heaps is required.
+ V8_EXPORT_PRIVATE bool SharedHeapContains(HeapObject value) const;
+
// Checks whether an address/object in a space.
// Currently used by tests, serialization and heap verification only.
V8_EXPORT_PRIVATE bool InSpace(HeapObject value, AllocationSpace space) const;
+ // Returns true when this heap is shared.
+ V8_EXPORT_PRIVATE bool IsShared();
+
// Slow methods that can be used for verification as they can also be used
// with off-heap Addresses.
V8_EXPORT_PRIVATE bool InSpaceSlow(Address addr, AllocationSpace space) const;
@@ -1335,9 +1381,9 @@ class Heap {
survived_since_last_expansion_ += survived;
}
- inline void UpdateNewSpaceAllocationCounter();
+ void UpdateNewSpaceAllocationCounter();
- inline size_t NewSpaceAllocationCounter();
+ V8_EXPORT_PRIVATE size_t NewSpaceAllocationCounter();
// This should be used only for testing.
void set_new_space_allocation_counter(size_t new_value) {
@@ -1451,6 +1497,12 @@ class Heap {
void MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback);
+ // Adds an allocation site to the list of sites to be pretenured during the
+ // next collection. Added allocation sites are pretenured independent of
+ // their feedback.
+ V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
+ AllocationSite site);
+
// ===========================================================================
// Allocation tracking. ======================================================
// ===========================================================================
@@ -2020,7 +2072,7 @@ class Heap {
AllocationAlignment alignment = kWordAligned);
// Allocates a heap object based on the map.
- V8_WARN_UNUSED_RESULT AllocationResult Allocate(Map map,
+ V8_WARN_UNUSED_RESULT AllocationResult Allocate(Handle<Map> map,
AllocationType allocation);
// Allocates a partial map for bootstrapping.
@@ -2045,6 +2097,7 @@ class Heap {
// Stores the option corresponding to the object in the provided *option.
bool IsRetainingPathTarget(HeapObject object, RetainingPathOption* option);
void PrintRetainingPath(HeapObject object, RetainingPathOption option);
+ void UpdateRetainersAfterScavenge();
#ifdef DEBUG
V8_EXPORT_PRIVATE void IncrementObjectCounters();
@@ -2117,9 +2170,18 @@ class Heap {
CodeLargeObjectSpace* code_lo_space_ = nullptr;
NewLargeObjectSpace* new_lo_space_ = nullptr;
ReadOnlySpace* read_only_space_ = nullptr;
+
+ OldSpace* shared_old_space_ = nullptr;
+ MapSpace* shared_map_space_ = nullptr;
+
+ std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
+ std::unique_ptr<ConcurrentAllocator> shared_map_allocator_;
+
// Map from the space id to the space.
Space* space_[LAST_SPACE + 1];
+ LocalHeap* main_thread_local_heap_ = nullptr;
+
// List for tracking ArrayBufferExtensions
ArrayBufferExtension* old_array_buffer_extensions_ = nullptr;
ArrayBufferExtension* young_array_buffer_extensions_ = nullptr;
@@ -2189,7 +2251,9 @@ class Heap {
// Weak list heads, threaded through the objects.
// List heads are initialized lazily and contain the undefined_value at start.
- Object native_contexts_list_;
+ // {native_contexts_list_} is an Address instead of an Object to allow the use
+ // of atomic accessors.
+ std::atomic<Address> native_contexts_list_;
Object allocation_sites_list_;
Object dirty_js_finalization_registries_list_;
// Weak list tails.
@@ -2247,9 +2311,18 @@ class Heap {
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
std::unique_ptr<MarkingBarrier> marking_barrier_;
+ // This object controls virtual space reserved for code on the V8 heap. This
+ // is only valid for 64-bit architectures where kRequiresCodeRange.
+ //
+ // Owned by the heap when !V8_COMPRESS_POINTERS_IN_SHARED_CAGE, otherwise is
+ // process-wide.
+ std::shared_ptr<CodeRange> code_range_;
+
// The embedder owns the C++ heap.
v8::CppHeap* cpp_heap_ = nullptr;
+ EmbedderRootsHandler* embedder_roots_handler_ = nullptr;
+
StrongRootsEntry* strong_roots_head_ = nullptr;
base::Mutex strong_roots_mutex_;
@@ -2278,6 +2351,9 @@ class Heap {
// forwarding pointers.
PretenuringFeedbackMap global_pretenuring_feedback_;
+ std::unique_ptr<GlobalHandleVector<AllocationSite>>
+ allocation_sites_to_pretenure_;
+
char trace_ring_buffer_[kTraceRingBufferSize];
// Used as boolean.
@@ -2335,14 +2411,15 @@ class Heap {
int allocation_timeout_ = 0;
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
- std::map<HeapObject, HeapObject, Object::Comparer> retainer_;
- std::map<HeapObject, Root, Object::Comparer> retaining_root_;
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher> retainer_;
+ std::unordered_map<HeapObject, Root, Object::Hasher> retaining_root_;
// If an object is retained by an ephemeron, then the retaining key of the
// ephemeron is stored in this map.
- std::map<HeapObject, HeapObject, Object::Comparer> ephemeron_retainer_;
+ std::unordered_map<HeapObject, HeapObject, Object::Hasher>
+ ephemeron_retainer_;
// For each index inthe retaining_path_targets_ array this map
// stores the option of the corresponding target.
- std::map<int, RetainingPathOption> retaining_path_target_option_;
+ std::unordered_map<int, RetainingPathOption> retaining_path_target_option_;
std::vector<HeapObjectAllocationTracker*> allocation_trackers_;
@@ -2367,6 +2444,7 @@ class Heap {
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class LocalHeap;
+ friend class MarkingBarrier;
friend class OldLargeObjectSpace;
template <typename ConcreteVisitor, typename MarkingState>
friend class MarkingVisitorBase;
@@ -2630,14 +2708,13 @@ class HeapObjectAllocationTracker {
template <typename T>
T ForwardingAddress(T heap_obj) {
- MapWord map_word = heap_obj.map_word();
+ MapWord map_word = heap_obj.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return T::cast(map_word.ToForwardingAddress());
} else if (Heap::InFromPage(heap_obj)) {
return T();
} else {
- // TODO(ulan): Support minor mark-compactor here.
return heap_obj;
}
}
diff --git a/chromium/v8/src/heap/incremental-marking.cc b/chromium/v8/src/heap/incremental-marking.cc
index a0938359817..0c405c40bf8 100644
--- a/chromium/v8/src/heap/incremental-marking.cc
+++ b/chromium/v8/src/heap/incremental-marking.cc
@@ -39,9 +39,8 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
size_t size) {
Heap* heap = incremental_marking_->heap();
VMState<GC> state(heap->isolate());
- RuntimeCallTimerScope runtime_timer(
- heap->isolate(),
- RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
+ RCS_SCOPE(heap->isolate(),
+ RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
incremental_marking_->AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
incremental_marking_->EnsureBlackAllocated(addr, size);
@@ -108,20 +107,28 @@ class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
MarkObjectByPointer(p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
- for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
+ MarkObjectByPointer(p);
+ }
}
private:
void MarkObjectByPointer(FullObjectSlot p) {
- Object obj = *p;
- if (!obj.IsHeapObject()) return;
-
- heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
+ Object object = *p;
+ if (!object.IsHeapObject()) return;
+ DCHECK(!MapWord::IsPacked(object.ptr()));
+ HeapObject heap_object = HeapObject::cast(object);
+ BasicMemoryChunk* target_page =
+ BasicMemoryChunk::FromHeapObject(heap_object);
+ if (target_page->InSharedHeap()) return;
+ heap_->incremental_marking()->WhiteToGreyAndPush(heap_object);
}
Heap* heap_;
@@ -132,12 +139,14 @@ bool IncrementalMarking::WasActivated() { return was_activated_; }
bool IncrementalMarking::CanBeActivated() {
- // Only start incremental marking in a safe state: 1) when incremental
- // marking is turned on, 2) when we are currently not in a GC, and
- // 3) when we are currently not serializing or deserializing the heap.
+ // Only start incremental marking in a safe state:
+ // 1) when incremental marking is turned on
+ // 2) when we are currently not in a GC, and
+ // 3) when we are currently not serializing or deserializing the heap, and
+ // 4) not a shared heap.
return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
heap_->deserialization_complete() &&
- !heap_->isolate()->serializer_enabled();
+ !heap_->isolate()->serializer_enabled() && !heap_->IsShared();
}
bool IncrementalMarking::IsBelowActivationThresholds() const {
@@ -147,6 +156,7 @@ bool IncrementalMarking::IsBelowActivationThresholds() const {
void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
DCHECK(!collector_->sweeping_in_progress());
+ DCHECK(!heap_->IsShared());
if (FLAG_trace_incremental_marking) {
const size_t old_generation_size_mb =
@@ -314,7 +324,9 @@ void IncrementalMarking::MarkRoots() {
IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateRoots(
- &visitor, base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kWeak});
+ &visitor,
+ base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
+ SkipRoot::kWeak});
}
bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
@@ -356,6 +368,9 @@ void IncrementalMarking::RetainMaps() {
if (!map_retaining_is_disabled && marking_state()->IsWhite(map)) {
if (ShouldRetainMap(map, age)) {
WhiteToGreyAndPush(map);
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainingRoot(Root::kRetainMaps, map);
+ }
}
Object prototype = map.prototype();
if (age > 0 && prototype.IsHeapObject() &&
@@ -432,7 +447,7 @@ void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
DCHECK(obj.IsHeapObject());
// Only pointers to from space have to be updated.
if (Heap::InFromPage(obj)) {
- MapWord map_word = obj.map_word();
+ MapWord map_word = obj.map_word(kRelaxedLoad);
if (!map_word.IsForwardingAddress()) {
// There may be objects on the marking deque that do not exist
// anymore, e.g. left trimmed objects or objects from the root set
diff --git a/chromium/v8/src/heap/large-spaces.cc b/chromium/v8/src/heap/large-spaces.cc
index 5cbcc8620fb..4bb989fe9a8 100644
--- a/chromium/v8/src/heap/large-spaces.cc
+++ b/chromium/v8/src/heap/large-spaces.cc
@@ -5,6 +5,7 @@
#include "src/heap/large-spaces.h"
#include "src/base/platform/mutex.h"
+#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/combined-heap.h"
@@ -18,7 +19,6 @@
#include "src/heap/spaces-inl.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
-#include "src/sanitizer/msan.h"
#include "src/utils/ostreams.h"
namespace v8 {
@@ -130,6 +130,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size) {
AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
Executability executable) {
+ DCHECK(!FLAG_enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGeneration(object_size) ||
@@ -160,6 +161,7 @@ AllocationResult OldLargeObjectSpace::AllocateRaw(int object_size,
AllocationResult OldLargeObjectSpace::AllocateRawBackground(
LocalHeap* local_heap, int object_size) {
+ DCHECK(!FLAG_enable_third_party_heap);
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
if (!heap()->CanExpandOldGenerationBackground(local_heap, object_size) ||
@@ -446,6 +448,7 @@ NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap, size_t capacity)
capacity_(capacity) {}
AllocationResult NewLargeObjectSpace::AllocateRaw(int object_size) {
+ DCHECK(!FLAG_enable_third_party_heap);
// Do not allocate more objects if promoting the existing object would exceed
// the old generation capacity.
if (!heap()->CanExpandOldGeneration(SizeOfObjects())) {
@@ -532,6 +535,7 @@ CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
chunk_map_(kInitialChunkMapCapacity) {}
AllocationResult CodeLargeObjectSpace::AllocateRaw(int object_size) {
+ DCHECK(!FLAG_enable_third_party_heap);
return OldLargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
}
diff --git a/chromium/v8/src/heap/large-spaces.h b/chromium/v8/src/heap/large-spaces.h
index 8761b9949c4..1e53671a9b1 100644
--- a/chromium/v8/src/heap/large-spaces.h
+++ b/chromium/v8/src/heap/large-spaces.h
@@ -32,6 +32,7 @@ class LargePage : public MemoryChunk {
static const int kMaxCodePageSize = 512 * MB;
static LargePage* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
}
diff --git a/chromium/v8/src/heap/local-allocator.h b/chromium/v8/src/heap/local-allocator.h
index 9efbf3cf563..e64932b9fe9 100644
--- a/chromium/v8/src/heap/local-allocator.h
+++ b/chromium/v8/src/heap/local-allocator.h
@@ -36,7 +36,7 @@ class EvacuationAllocator {
// Give back remaining LAB space if this EvacuationAllocator's new space LAB
// sits right next to new space allocation top.
const LinearAllocationArea info = new_space_lab_.CloseAndMakeIterable();
- new_space_->MaybeFreeUnusedLab(info);
+ if (new_space_) new_space_->MaybeFreeUnusedLab(info);
}
inline AllocationResult Allocate(AllocationSpace space, int object_size,
diff --git a/chromium/v8/src/heap/local-heap-inl.h b/chromium/v8/src/heap/local-heap-inl.h
index fd0ec5a4499..e1333773dd3 100644
--- a/chromium/v8/src/heap/local-heap-inl.h
+++ b/chromium/v8/src/heap/local-heap-inl.h
@@ -18,6 +18,7 @@ namespace internal {
AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
+ DCHECK(!FLAG_enable_third_party_heap);
#if DEBUG
VerifyCurrent();
DCHECK(AllowHandleAllocation::IsAllowed());
@@ -45,6 +46,7 @@ AllocationResult LocalHeap::AllocateRaw(int size_in_bytes, AllocationType type,
Address LocalHeap::AllocateRawOrFail(int object_size, AllocationType type,
AllocationOrigin origin,
AllocationAlignment alignment) {
+ DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result = AllocateRaw(object_size, type, origin, alignment);
if (!result.IsRetry()) return result.ToObject().address();
return PerformCollectionAndAllocateAgain(object_size, type, origin,
diff --git a/chromium/v8/src/heap/local-heap.cc b/chromium/v8/src/heap/local-heap.cc
index 70cdbcc0d74..2c6724ba3a3 100644
--- a/chromium/v8/src/heap/local-heap.cc
+++ b/chromium/v8/src/heap/local-heap.cc
@@ -221,7 +221,7 @@ bool LocalHeap::TryPerformCollection() {
heap_->CollectGarbageForBackground(this);
return true;
} else {
- LocalHeap* main_thread = heap_->isolate()->main_thread_local_heap();
+ LocalHeap* main_thread = heap_->main_thread_local_heap();
ThreadState current = main_thread->state_relaxed();
while (true) {
diff --git a/chromium/v8/src/heap/mark-compact-inl.h b/chromium/v8/src/heap/mark-compact-inl.h
index a9db17f2aac..f28b9b5e849 100644
--- a/chromium/v8/src/heap/mark-compact-inl.h
+++ b/chromium/v8/src/heap/mark-compact-inl.h
@@ -205,9 +205,10 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject black_object = HeapObject::FromAddress(addr);
- Object map_object = ObjectSlot(addr).Acquire_Load();
+ Object map_object = black_object.map(kAcquireLoad);
CHECK(map_object.IsMap());
map = Map::cast(map_object);
+ DCHECK(map.IsMap());
size = black_object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
Address end = addr + size - kTaggedSize;
@@ -235,10 +236,11 @@ void LiveObjectRange<mode>::iterator::AdvanceToNextValidObject() {
object = black_object;
}
} else if ((mode == kGreyObjects || mode == kAllLiveObjects)) {
- Object map_object = ObjectSlot(addr).Acquire_Load();
+ object = HeapObject::FromAddress(addr);
+ Object map_object = object.map(kAcquireLoad);
CHECK(map_object.IsMap());
map = Map::cast(map_object);
- object = HeapObject::FromAddress(addr);
+ DCHECK(map.IsMap());
size = object.SizeFromMap(map);
CHECK_LE(addr + size, chunk_->area_end());
}
diff --git a/chromium/v8/src/heap/mark-compact.cc b/chromium/v8/src/heap/mark-compact.cc
index 951b49507ca..73eab9e2038 100644
--- a/chromium/v8/src/heap/mark-compact.cc
+++ b/chromium/v8/src/heap/mark-compact.cc
@@ -42,6 +42,7 @@
#include "src/heap/worklist.h"
#include "src/ic/stub-cache.h"
#include "src/init/v8.h"
+#include "src/logging/tracing-flags.h"
#include "src/objects/embedder-data-array-inl.h"
#include "src/objects/foreign.h"
#include "src/objects/hash-table-inl.h"
@@ -51,6 +52,7 @@
#include "src/objects/slots-inl.h"
#include "src/objects/transitions-inl.h"
#include "src/tasks/cancelable-task.h"
+#include "src/tracing/tracing-category-observer.h"
#include "src/utils/utils-inl.h"
namespace v8 {
@@ -82,6 +84,7 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
const MemoryChunk* chunk) = 0;
+ virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
@@ -105,6 +108,8 @@ class MarkingVerifier : public ObjectVisitor, public RootVisitor {
VerifyRootPointers(start, end);
}
+ void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+
void VerifyRoots();
void VerifyMarkingOnPage(const Page* page, Address start, Address end);
void VerifyMarking(NewSpace* new_space);
@@ -146,6 +151,7 @@ void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
}
void MarkingVerifier::VerifyMarking(NewSpace* space) {
+ if (!space) return;
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
@@ -168,6 +174,7 @@ void MarkingVerifier::VerifyMarking(PagedSpace* space) {
}
void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
+ if (!lo_space) return;
LargeObjectSpaceObjectIterator it(lo_space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
if (IsBlackOrGrey(obj)) {
@@ -208,6 +215,8 @@ class FullMarkingVerifier : public MarkingVerifier {
return marking_state_->IsBlackOrGrey(object);
}
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
+
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -235,6 +244,9 @@ class FullMarkingVerifier : public MarkingVerifier {
private:
V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
+ if (!heap_->IsShared() &&
+ BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
+ return;
CHECK(marking_state_->IsBlackOrGrey(heap_object));
}
@@ -271,11 +283,14 @@ class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
VerifyRootPointers(start, end);
}
+ void VisitMapPointer(HeapObject object) override { VerifyMap(object.map()); }
+
protected:
explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
inline Heap* heap() { return heap_; }
+ virtual void VerifyMap(Map map) = 0;
virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
@@ -302,6 +317,7 @@ void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
}
void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
+ if (!space) return;
PageRange range(space->first_allocatable_address(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
@@ -354,7 +370,7 @@ class FullEvacuationVerifier : public EvacuationVerifier {
}
}
}
-
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -408,6 +424,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG
state_(IDLE),
#endif
+ is_shared_heap_(heap->IsShared()),
was_marked_incrementally_(false),
evacuation_(false),
compacting_(false),
@@ -548,6 +565,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+ if (!space) return;
for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
@@ -555,6 +573,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
}
void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
+ if (!space) return;
LargeObjectSpaceObjectIterator it(space);
for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
CHECK(non_atomic_marking_state()->IsWhite(obj));
@@ -863,9 +882,14 @@ void MarkCompactCollector::Prepare() {
[](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
// All objects are guaranteed to be initialized in atomic pause
- heap()->new_lo_space()->ResetPendingObject();
- DCHECK_EQ(heap()->new_space()->top(),
- heap()->new_space()->original_top_acquire());
+ if (heap()->new_lo_space()) {
+ heap()->new_lo_space()->ResetPendingObject();
+ }
+
+ if (heap()->new_space()) {
+ DCHECK_EQ(heap()->new_space()->top(),
+ heap()->new_space()->original_top_acquire());
+ }
}
void MarkCompactCollector::FinishConcurrentMarking() {
@@ -950,26 +974,34 @@ void MarkCompactCollector::SweepArrayBufferExtensions() {
class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
public:
explicit RootMarkingVisitor(MarkCompactCollector* collector)
- : collector_(collector) {}
+ : collector_(collector), is_shared_heap_(collector->is_shared_heap()) {}
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) final {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
MarkObjectByPointer(root, p);
}
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
- for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ MarkObjectByPointer(root, p);
+ }
}
private:
V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
- if (!(*p).IsHeapObject()) return;
-
- collector_->MarkRootObject(root, HeapObject::cast(*p));
+ Object object = *p;
+ if (!object.IsHeapObject()) return;
+ HeapObject heap_object = HeapObject::cast(object);
+ BasicMemoryChunk* target_page =
+ BasicMemoryChunk::FromHeapObject(heap_object);
+ if (!is_shared_heap_ && target_page->InSharedHeap()) return;
+ collector_->MarkRootObject(root, heap_object);
}
MarkCompactCollector* const collector_;
+ const bool is_shared_heap_;
};
// This visitor is used to visit the body of special objects held alive by
@@ -991,8 +1023,12 @@ class MarkCompactCollector::CustomRootBodyMarkingVisitor final
MarkObject(host, *p);
}
+ void VisitMapPointer(HeapObject host) final { MarkObject(host, host.map()); }
+
void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
for (ObjectSlot p = start; p < end; ++p) {
+ // The map slot should be handled in VisitMapPointer.
+ DCHECK_NE(host.map_slot(), p);
DCHECK(!HasWeakHeapObjectTag(*p));
MarkObject(host, *p);
}
@@ -1145,6 +1181,7 @@ class RecordMigratedSlotVisitor : public ObjectVisitor {
}
inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
RecordMigratedSlot(host, *p, p.address());
}
@@ -1310,7 +1347,7 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
if (mode != MigrationMode::kFast)
base->ExecuteMigrationObservers(dest, src, dst, size);
}
- src.set_map_word(MapWord::FromForwardingAddress(dst));
+ src.set_map_word(MapWord::FromForwardingAddress(dst), kRelaxedStore);
}
EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
@@ -1439,7 +1476,8 @@ class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
if (map.visitor_id() == kVisitThinString) {
HeapObject actual = ThinString::cast(object).unchecked_actual();
if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
- object.set_map_word(MapWord::FromForwardingAddress(actual));
+ object.set_map_word(MapWord::FromForwardingAddress(actual),
+ kRelaxedStore);
return true;
}
// TODO(mlippautz): Handle ConsString.
@@ -1543,7 +1581,7 @@ class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
HeapObject target_object;
if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
object, size, &target_object)) {
- DCHECK(object.map_word().IsForwardingAddress());
+ DCHECK(object.map_word(kRelaxedLoad).IsForwardingAddress());
return true;
}
return false;
@@ -2483,9 +2521,6 @@ void MarkCompactCollector::ClearWeakReferences() {
}
void MarkCompactCollector::ClearJSWeakRefs() {
- if (!FLAG_harmony_weak_refs) {
- return;
- }
JSWeakRef weak_ref;
while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) {
HeapObject target = HeapObject::cast(weak_ref.target());
@@ -2680,7 +2715,7 @@ static inline SlotCallbackResult UpdateSlot(TSlot slot,
std::is_same<TSlot, OffHeapObjectSlot>::value,
"Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
"expected here");
- MapWord map_word = heap_obj.map_word();
+ MapWord map_word = heap_obj.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
@@ -2762,6 +2797,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
void VisitRootPointer(Root root, const char* description,
FullObjectSlot p) override {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
UpdateRootSlotInternal(cage_base_, p);
}
@@ -2821,7 +2857,7 @@ class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
FullObjectSlot p) {
- MapWord map_word = HeapObject::cast(*p).map_word();
+ MapWord map_word = HeapObject::cast(*p).map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
String new_string = String::cast(map_word.ToForwardingAddress());
@@ -2841,18 +2877,23 @@ static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
void MarkCompactCollector::EvacuatePrologue() {
// New space.
NewSpace* new_space = heap()->new_space();
- // Append the list of new space pages to be processed.
- for (Page* p :
- PageRange(new_space->first_allocatable_address(), new_space->top())) {
- new_space_evacuation_pages_.push_back(p);
- }
- new_space->Flip();
- new_space->ResetLinearAllocationArea();
- DCHECK_EQ(new_space->Size(), 0);
+ if (new_space) {
+ // Append the list of new space pages to be processed.
+ for (Page* p :
+ PageRange(new_space->first_allocatable_address(), new_space->top())) {
+ new_space_evacuation_pages_.push_back(p);
+ }
+ new_space->Flip();
+ new_space->ResetLinearAllocationArea();
+
+ DCHECK_EQ(new_space->Size(), 0);
+ }
- heap()->new_lo_space()->Flip();
- heap()->new_lo_space()->ResetPendingObject();
+ if (heap()->new_lo_space()) {
+ heap()->new_lo_space()->Flip();
+ heap()->new_lo_space()->ResetPendingObject();
+ }
// Old space.
DCHECK(old_space_evacuation_pages_.empty());
@@ -2863,18 +2904,27 @@ void MarkCompactCollector::EvacuatePrologue() {
void MarkCompactCollector::EvacuateEpilogue() {
aborted_evacuation_candidates_.clear();
+
// New space.
- heap()->new_space()->set_age_mark(heap()->new_space()->top());
- DCHECK_IMPLIES(FLAG_always_promote_young_mc,
- heap()->new_space()->Size() == 0);
+ if (heap()->new_space()) {
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
+ DCHECK_IMPLIES(FLAG_always_promote_young_mc,
+ heap()->new_space()->Size() == 0);
+ }
+
// Deallocate unmarked large objects.
heap()->lo_space()->FreeUnmarkedObjects();
heap()->code_lo_space()->FreeUnmarkedObjects();
- heap()->new_lo_space()->FreeUnmarkedObjects();
+ if (heap()->new_lo_space()) {
+ heap()->new_lo_space()->FreeUnmarkedObjects();
+ }
+
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+
// Give pages that are queued to be freed back to the OS.
heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
@@ -3274,19 +3324,21 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
}
// Promote young generation large objects.
- IncrementalMarking::NonAtomicMarkingState* marking_state =
- heap()->incremental_marking()->non_atomic_marking_state();
-
- for (auto it = heap()->new_lo_space()->begin();
- it != heap()->new_lo_space()->end();) {
- LargePage* current = *it;
- it++;
- HeapObject object = current->GetObject();
- DCHECK(!marking_state->IsGrey(object));
- if (marking_state->IsBlack(object)) {
- heap_->lo_space()->PromoteNewLargeObject(current);
- current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- evacuation_items.emplace_back(ParallelWorkItem{}, current);
+ if (heap()->new_lo_space()) {
+ IncrementalMarking::NonAtomicMarkingState* marking_state =
+ heap()->incremental_marking()->non_atomic_marking_state();
+
+ for (auto it = heap()->new_lo_space()->begin();
+ it != heap()->new_lo_space()->end();) {
+ LargePage* current = *it;
+ it++;
+ HeapObject object = current->GetObject();
+ DCHECK(!marking_state->IsGrey(object));
+ if (marking_state->IsBlack(object)) {
+ heap_->lo_space()->PromoteNewLargeObject(current);
+ current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ evacuation_items.emplace_back(ParallelWorkItem{}, current);
+ }
}
}
@@ -3314,7 +3366,7 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
Object RetainAs(Object object) override {
if (object.IsHeapObject()) {
HeapObject heap_object = HeapObject::cast(object);
- MapWord map_word = heap_object.map_word();
+ MapWord map_word = heap_object.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
@@ -3443,7 +3495,7 @@ void MarkCompactCollector::Evacuate() {
UpdatePointersAfterEvacuation();
- {
+ if (heap()->new_space()) {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
if (!heap()->new_space()->Rebalance()) {
heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
@@ -3645,7 +3697,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
return REMOVE_SLOT;
}
if (Heap::InFromPage(heap_object)) {
- MapWord map_word = heap_object.map_word();
+ MapWord map_word = heap_object.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
HeapObjectReference::Update(THeapObjectSlot(slot),
map_word.ToForwardingAddress());
@@ -3815,6 +3867,8 @@ MarkCompactCollector::CreateRememberedSetUpdatingItem(
int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
std::vector<std::unique_ptr<UpdatingItem>>* items) {
+ if (!heap()->new_space()) return 0;
+
// Seed to space pages.
const Address space_start = heap()->new_space()->first_allocatable_address();
const Address space_end = heap()->new_space()->top();
@@ -3877,7 +3931,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
it != heap_->ephemeron_remembered_set_.end();) {
EphemeronHashTable table = it->first;
auto& indices = it->second;
- if (table.map_word().IsForwardingAddress()) {
+ if (table.map_word(kRelaxedLoad).IsForwardingAddress()) {
// The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
// inserts entries for the moved table into ephemeron_remembered_set_.
it = heap_->ephemeron_remembered_set_.erase(it);
@@ -3890,7 +3944,7 @@ class EphemeronTableUpdatingItem : public UpdatingItem {
HeapObjectSlot key_slot(table.RawFieldOfElementAt(
EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
HeapObject key = key_slot.ToHeapObject();
- MapWord map_word = key.map_word();
+ MapWord map_word = key.map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
key = map_word.ToForwardingAddress();
key_slot.StoreHeapObject(key);
@@ -4145,6 +4199,8 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
protected:
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
+
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -4213,7 +4269,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
}
}
}
-
+ void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
VerifyPointersImpl(start, end);
}
@@ -4482,6 +4538,7 @@ class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) final {
for (FullObjectSlot p = start; p < end; ++p) {
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
MarkObjectByPointer(p);
}
}
@@ -4539,7 +4596,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
}
// Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead.
- // TODO(ulan): Don't free all as soon as we have an intermediate generation.
+ // TODO(v8:11685): Don't free all as soon as we have an intermediate
+ // generation.
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
}
@@ -4572,7 +4630,7 @@ void MinorMarkCompactCollector::MakeIterable(
p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
ClearRecordedSlots::kNo);
}
- Map map = object.synchronized_map();
+ Map map = object.map(kAcquireLoad);
int size = object.SizeFromMap(map);
free_start = free_end + size;
}
@@ -5027,7 +5085,7 @@ void MinorMarkCompactCollector::TraceFragmentation() {
free_bytes_index++;
}
}
- Map map = object.synchronized_map();
+ Map map = object.map(kAcquireLoad);
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
diff --git a/chromium/v8/src/heap/mark-compact.h b/chromium/v8/src/heap/mark-compact.h
index 733588ae80a..035fb37064a 100644
--- a/chromium/v8/src/heap/mark-compact.h
+++ b/chromium/v8/src/heap/mark-compact.h
@@ -506,6 +506,7 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void RecordLiveSlotsOnPage(Page* page);
bool is_compacting() const { return compacting_; }
+ bool is_shared_heap() const { return is_shared_heap_; }
// Ensures that sweeping is finished.
//
@@ -605,11 +606,11 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void MarkLiveObjects() override;
- // Marks the object black and adds it to the marking work list.
+ // Marks the object grey and adds it to the marking work list.
// This is for non-incremental marking only.
V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
- // Marks the object black and adds it to the marking work list.
+ // Marks the object grey and adds it to the marking work list.
// This is for non-incremental marking only.
V8_INLINE void MarkRootObject(Root root, HeapObject obj);
@@ -743,6 +744,8 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
CollectorState state_;
#endif
+ const bool is_shared_heap_;
+
bool was_marked_incrementally_;
bool evacuation_;
diff --git a/chromium/v8/src/heap/marking-barrier-inl.h b/chromium/v8/src/heap/marking-barrier-inl.h
index 56bd7efda23..d03bdcb0f7a 100644
--- a/chromium/v8/src/heap/marking-barrier-inl.h
+++ b/chromium/v8/src/heap/marking-barrier-inl.h
@@ -28,8 +28,14 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
// visits the host object.
return false;
}
- if (WhiteToGreyAndPush(value) && is_main_thread_barrier_) {
- incremental_marking_->RestartIfNotMarking();
+ if (WhiteToGreyAndPush(value)) {
+ if (is_main_thread_barrier_) {
+ incremental_marking_->RestartIfNotMarking();
+ }
+
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->AddRetainingRoot(Root::kWriteBarrier, value);
+ }
}
return true;
}
diff --git a/chromium/v8/src/heap/marking-visitor-inl.h b/chromium/v8/src/heap/marking-visitor-inl.h
index 55c37e535bd..14e8a4d3552 100644
--- a/chromium/v8/src/heap/marking-visitor-inl.h
+++ b/chromium/v8/src/heap/marking-visitor-inl.h
@@ -22,6 +22,7 @@ namespace internal {
template <typename ConcreteVisitor, typename MarkingState>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkObject(
HeapObject host, HeapObject object) {
+ DCHECK(ReadOnlyHeap::Contains(object) || heap_->Contains(object));
concrete_visitor()->SynchronizePageAccess(object);
if (concrete_visitor()->marking_state()->WhiteToGrey(object)) {
local_marking_worklists_->Push(object);
@@ -38,6 +39,9 @@ template <typename ConcreteVisitor, typename MarkingState>
template <typename THeapObjectSlot>
void MarkingVisitorBase<ConcreteVisitor, MarkingState>::ProcessStrongHeapObject(
HeapObject host, THeapObjectSlot slot, HeapObject heap_object) {
+ concrete_visitor()->SynchronizePageAccess(heap_object);
+ BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(heap_object);
+ if (!is_shared_heap_ && target_page->InSharedHeap()) return;
MarkObject(host, heap_object);
concrete_visitor()->RecordSlot(host, slot, heap_object);
}
@@ -355,7 +359,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::MarkDescriptorArrayBlack(
DescriptorArray descriptors) {
concrete_visitor()->marking_state()->WhiteToGrey(descriptors);
if (concrete_visitor()->marking_state()->GreyToBlack(descriptors)) {
- VisitPointer(descriptors, descriptors.map_slot());
+ VisitMapPointer(descriptors);
VisitPointers(descriptors, descriptors.GetFirstPointerSlot(),
descriptors.GetDescriptorSlot(0));
return DescriptorArray::BodyDescriptor::SizeOf(descriptors.map(),
diff --git a/chromium/v8/src/heap/marking-visitor.h b/chromium/v8/src/heap/marking-visitor.h
index 45dda338d01..f8795aadfd5 100644
--- a/chromium/v8/src/heap/marking-visitor.h
+++ b/chromium/v8/src/heap/marking-visitor.h
@@ -114,7 +114,8 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
mark_compact_epoch_(mark_compact_epoch),
bytecode_flush_mode_(bytecode_flush_mode),
is_embedder_tracing_enabled_(is_embedder_tracing_enabled),
- is_forced_gc_(is_forced_gc) {}
+ is_forced_gc_(is_forced_gc),
+ is_shared_heap_(heap->IsShared()) {}
V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
V8_INLINE int VisitDescriptorArray(Map map, DescriptorArray object);
@@ -133,6 +134,11 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
V8_INLINE int VisitWeakCell(Map map, WeakCell object);
// ObjectVisitor overrides.
+ void VisitMapPointer(HeapObject host) final {
+ // Note that we are skipping the recording the slot because map objects
+ // can't move, so this is safe (see ProcessStrongHeapObject for comparison)
+ MarkObject(host, HeapObject::cast(host.map()));
+ }
V8_INLINE void VisitPointer(HeapObject host, ObjectSlot p) final {
VisitPointersImpl(host, p, p + 1);
}
@@ -196,6 +202,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
const BytecodeFlushMode bytecode_flush_mode_;
const bool is_embedder_tracing_enabled_;
const bool is_forced_gc_;
+ const bool is_shared_heap_;
};
} // namespace internal
diff --git a/chromium/v8/src/heap/memory-allocator.cc b/chromium/v8/src/heap/memory-allocator.cc
index b5bccb879d0..5783f2d04b9 100644
--- a/chromium/v8/src/heap/memory-allocator.cc
+++ b/chromium/v8/src/heap/memory-allocator.cc
@@ -20,118 +20,23 @@
namespace v8 {
namespace internal {
-static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
- LAZY_INSTANCE_INITIALIZER;
-
-namespace {
-void FunctionInStaticBinaryForAddressHint() {}
-} // namespace
-
-Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- auto it = recently_freed_.find(code_range_size);
- if (it == recently_freed_.end() || it->second.empty()) {
- return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
- }
- Address result = it->second.back();
- it->second.pop_back();
- return result;
-}
-
-void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size) {
- base::MutexGuard guard(&mutex_);
- recently_freed_[code_range_size].push_back(code_range_start);
-}
-
// -----------------------------------------------------------------------------
// MemoryAllocator
//
-MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
- size_t code_range_size)
+MemoryAllocator::MemoryAllocator(Isolate* isolate,
+ v8::PageAllocator* code_page_allocator,
+ size_t capacity)
: isolate_(isolate),
data_page_allocator_(isolate->page_allocator()),
- code_page_allocator_(nullptr),
+ code_page_allocator_(code_page_allocator),
capacity_(RoundUp(capacity, Page::kPageSize)),
size_(0),
size_executable_(0),
lowest_ever_allocated_(static_cast<Address>(-1ll)),
highest_ever_allocated_(kNullAddress),
unmapper_(isolate->heap(), this) {
- InitializeCodePageAllocator(data_page_allocator_, code_range_size);
-}
-
-void MemoryAllocator::InitializeCodePageAllocator(
- v8::PageAllocator* page_allocator, size_t requested) {
- DCHECK_NULL(code_page_allocator_instance_.get());
-
- code_page_allocator_ = page_allocator;
-
- if (requested == 0) {
- if (!isolate_->RequiresCodeRange()) return;
- // When a target requires the code range feature, we put all code objects
- // in a kMaximalCodeRangeSize range of virtual address space, so that
- // they can call each other with near calls.
- requested = kMaximalCodeRangeSize;
- } else if (requested <= kMinimumCodeRangeSize) {
- requested = kMinimumCodeRangeSize;
- }
-
- const size_t reserved_area =
- kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
- if (requested < (kMaximalCodeRangeSize - reserved_area)) {
- requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
- // Fullfilling both reserved pages requirement and huge code area
- // alignments is not supported (requires re-implementation).
- DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
- }
- DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
-
- Address hint =
- RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
- page_allocator->AllocatePageSize());
- VirtualMemory reservation(
- page_allocator, requested, reinterpret_cast<void*>(hint),
- std::max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
- if (!reservation.IsReserved()) {
- V8::FatalProcessOutOfMemory(isolate_,
- "CodeRange setup: allocate virtual memory");
- }
- code_range_ = reservation.region();
- isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
-
- // We are sure that we have mapped a block of requested addresses.
- DCHECK_GE(reservation.size(), requested);
- Address base = reservation.address();
-
- // On some platforms, specifically Win64, we need to reserve some pages at
- // the beginning of an executable space. See
- // https://cs.chromium.org/chromium/src/components/crash/content/
- // app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
- // for details.
- if (reserved_area > 0) {
- if (!reservation.SetPermissions(base, reserved_area,
- PageAllocator::kReadWrite))
- V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
-
- base += reserved_area;
- }
- Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
- size_t size =
- RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
- MemoryChunk::kPageSize);
- DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
-
- LOG(isolate_,
- NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
- requested));
-
- code_reservation_ = std::move(reservation);
- code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
- page_allocator, aligned_base, size,
- static_cast<size_t>(MemoryChunk::kAlignment));
- code_page_allocator_ = code_page_allocator_instance_.get();
+ DCHECK_NOT_NULL(code_page_allocator);
}
void MemoryAllocator::TearDown() {
@@ -147,13 +52,6 @@ void MemoryAllocator::TearDown() {
last_chunk_.Free();
}
- if (code_page_allocator_instance_.get()) {
- DCHECK(!code_range_.is_empty());
- code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
- code_range_.size());
- code_range_ = base::AddressRegion();
- code_page_allocator_instance_.reset();
- }
code_page_allocator_ = nullptr;
data_page_allocator_ = nullptr;
}
diff --git a/chromium/v8/src/heap/memory-allocator.h b/chromium/v8/src/heap/memory-allocator.h
index 179877e753a..d405aefa53b 100644
--- a/chromium/v8/src/heap/memory-allocator.h
+++ b/chromium/v8/src/heap/memory-allocator.h
@@ -17,6 +17,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
+#include "src/heap/code-range.h"
#include "src/heap/heap.h"
#include "src/heap/memory-chunk.h"
#include "src/heap/spaces.h"
@@ -30,27 +31,6 @@ class Heap;
class Isolate;
class ReadOnlyPage;
-// The process-wide singleton that keeps track of code range regions with the
-// intention to reuse free code range regions as a workaround for CFG memory
-// leaks (see crbug.com/870054).
-class CodeRangeAddressHint {
- public:
- // Returns the most recently freed code range start address for the given
- // size. If there is no such entry, then a random address is returned.
- V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
-
- V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
- size_t code_range_size);
-
- private:
- base::Mutex mutex_;
- // A map from code range size to an array of recently freed code range
- // addresses. There should be O(1) different code range sizes.
- // The length of each array is limited by the peak number of code ranges,
- // which should be also O(1).
- std::unordered_map<size_t, std::vector<Address>> recently_freed_;
-};
-
// ----------------------------------------------------------------------------
// A space acquires chunks of memory from the operating system. The memory
// allocator allocates and deallocates pages for the paged heap spaces and large
@@ -172,8 +152,9 @@ class MemoryAllocator {
V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
Address addr, size_t size);
- V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
- size_t code_range_size);
+ V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate,
+ v8::PageAllocator* code_page_allocator,
+ size_t max_capacity);
V8_EXPORT_PRIVATE void TearDown();
@@ -283,17 +264,6 @@ class MemoryAllocator {
: data_page_allocator_;
}
- // A region of memory that may contain executable code including reserved
- // OS page with read-write access in the beginning.
- const base::AddressRegion& code_range() const {
- // |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
- DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
- DCHECK_IMPLIES(!code_range_.is_empty(),
- code_range_.contains(code_page_allocator_instance_->begin(),
- code_page_allocator_instance_->size()));
- return code_range_;
- }
-
Unmapper* unmapper() { return &unmapper_; }
// Performs all necessary bookkeeping to free the memory, but does not free
@@ -306,9 +276,6 @@ class MemoryAllocator {
void RegisterReadOnlyMemory(ReadOnlyPage* page);
private:
- void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
- size_t requested);
-
// PreFreeMemory logically frees the object, i.e., it unregisters the
// memory, logs a delete event and adds the chunk to remembered unmapped
// pages.
@@ -360,10 +327,6 @@ class MemoryAllocator {
Isolate* isolate_;
- // This object controls virtual space reserved for code on the V8 heap. This
- // is only valid for 64-bit architectures where kRequiresCodeRange.
- VirtualMemory code_reservation_;
-
// Page allocator used for allocating data pages. Depending on the
// configuration it may be a page allocator instance provided by
// v8::Platform or a BoundedPageAllocator (when pointer compression is
@@ -371,29 +334,12 @@ class MemoryAllocator {
v8::PageAllocator* data_page_allocator_;
// Page allocator used for allocating code pages. Depending on the
- // configuration it may be a page allocator instance provided by
- // v8::Platform or a BoundedPageAllocator (when pointer compression is
- // enabled or on those 64-bit architectures where pc-relative 32-bit
+ // configuration it may be a page allocator instance provided by v8::Platform
+ // or a BoundedPageAllocator from Heap::code_range_ (when pointer compression
+ // is enabled or on those 64-bit architectures where pc-relative 32-bit
// displacement can be used for call and jump instructions).
v8::PageAllocator* code_page_allocator_;
- // A part of the |code_reservation_| that may contain executable code
- // including reserved page with read-write access in the beginning.
- // See details below.
- base::AddressRegion code_range_;
-
- // This unique pointer owns the instance of bounded code allocator
- // that controls executable pages allocation. It does not control the
- // optionally existing page in the beginning of the |code_range_|.
- // So, summarizing all above, the following conditions hold:
- // 1) |code_reservation_| >= |code_range_|
- // 2) |code_range_| >= |optional RW pages| +
- // |code_page_allocator_instance_|. 3) |code_reservation_| is
- // AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
- // MemoryChunk::kAlignment-aligned 5) |code_range_| is
- // CommitPageSize()-aligned
- std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
-
// Maximum space size in bytes.
size_t capacity_;
diff --git a/chromium/v8/src/heap/memory-chunk.cc b/chromium/v8/src/heap/memory-chunk.cc
index c2355c6b84b..4d16da707f9 100644
--- a/chromium/v8/src/heap/memory-chunk.cc
+++ b/chromium/v8/src/heap/memory-chunk.cc
@@ -161,6 +161,9 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
chunk->possibly_empty_buckets_.Initialize();
+ // All pages of a shared heap need to be marked with this flag.
+ if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
+
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
#endif
diff --git a/chromium/v8/src/heap/memory-measurement.cc b/chromium/v8/src/heap/memory-measurement.cc
index ab491e19a6e..491fe3102ab 100644
--- a/chromium/v8/src/heap/memory-measurement.cc
+++ b/chromium/v8/src/heap/memory-measurement.cc
@@ -336,7 +336,7 @@ std::unique_ptr<v8::MeasureMemoryDelegate> MemoryMeasurement::DefaultDelegate(
bool NativeContextInferrer::InferForContext(Isolate* isolate, Context context,
Address* native_context) {
- Map context_map = context.synchronized_map();
+ Map context_map = context.map(kAcquireLoad);
Object maybe_native_context =
TaggedField<Object, Map::kConstructorOrBackPointerOrNativeContextOffset>::
Acquire_Load(isolate, context_map);
diff --git a/chromium/v8/src/heap/new-spaces-inl.h b/chromium/v8/src/heap/new-spaces-inl.h
index ffd5d8cfd7e..98626260e11 100644
--- a/chromium/v8/src/heap/new-spaces-inl.h
+++ b/chromium/v8/src/heap/new-spaces-inl.h
@@ -5,12 +5,12 @@
#ifndef V8_HEAP_NEW_SPACES_INL_H_
#define V8_HEAP_NEW_SPACES_INL_H_
+#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/heap/heap.h"
#include "src/heap/new-spaces.h"
#include "src/heap/spaces-inl.h"
#include "src/objects/tagged-impl.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -87,6 +87,8 @@ HeapObject SemiSpaceObjectIterator::Next() {
AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_single_generation);
+ DCHECK(!FLAG_enable_third_party_heap);
#if DEBUG
VerifyTop();
#endif
@@ -110,7 +112,7 @@ AllocationResult NewSpace::AllocateFastUnaligned(int size_in_bytes,
AllocationOrigin origin) {
Address top = allocation_info_.top();
if (allocation_info_.limit() < top + size_in_bytes) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
@@ -135,7 +137,7 @@ AllocationResult NewSpace::AllocateFastAligned(
if (allocation_info_.limit() - top <
static_cast<uintptr_t>(aligned_size_in_bytes)) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
HeapObject obj = HeapObject::FromAddress(top);
diff --git a/chromium/v8/src/heap/new-spaces.cc b/chromium/v8/src/heap/new-spaces.cc
index 029b77beb4d..8486c7bcc44 100644
--- a/chromium/v8/src/heap/new-spaces.cc
+++ b/chromium/v8/src/heap/new-spaces.cc
@@ -4,6 +4,7 @@
#include "src/heap/new-spaces.h"
+#include "src/common/globals.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
@@ -628,8 +629,9 @@ AllocationResult NewSpace::AllocateRawSlow(int size_in_bytes,
AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
@@ -646,8 +648,9 @@ AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureAllocation(size_in_bytes, alignment)) {
- return AllocationResult::Retry();
+ return AllocationResult::Retry(NEW_SPACE);
}
DCHECK_EQ(allocation_info_.start(), allocation_info_.top());
@@ -741,9 +744,11 @@ void NewSpace::Verify(Isolate* isolate) {
CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
}
+ if (!FLAG_concurrent_array_buffer_sweeping) {
size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
+ }
// Check semi-spaces.
CHECK_EQ(from_space_.id(), kFromSpace);
diff --git a/chromium/v8/src/heap/object-stats.cc b/chromium/v8/src/heap/object-stats.cc
index 86b2e6a2c40..5e0074e47f7 100644
--- a/chromium/v8/src/heap/object-stats.cc
+++ b/chromium/v8/src/heap/object-stats.cc
@@ -74,7 +74,7 @@ class FieldStatsCollector : public ObjectVisitor {
raw_fields_count_in_object -= kDoubleSize / kTaggedSize;
*boxed_double_fields_count_ += 1;
} else if (host.IsSeqString()) {
- int string_data = SeqString::cast(host).synchronized_length() *
+ int string_data = SeqString::cast(host).length(kAcquireLoad) *
(String::cast(host).IsOneByteRepresentation() ? 1 : 2) /
kTaggedSize;
DCHECK_LE(string_data, raw_fields_count_in_object);
diff --git a/chromium/v8/src/heap/objects-visiting-inl.h b/chromium/v8/src/heap/objects-visiting-inl.h
index 37ecd50c8df..8aac430ddbd 100644
--- a/chromium/v8/src/heap/objects-visiting-inl.h
+++ b/chromium/v8/src/heap/objects-visiting-inl.h
@@ -76,8 +76,9 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(Map map,
template <typename ResultType, typename ConcreteVisitor>
void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
HeapObject host) {
- DCHECK(!host.map_word().IsForwardingAddress());
- static_cast<ConcreteVisitor*>(this)->VisitPointer(host, host.map_slot());
+ DCHECK(!host.map_word(kRelaxedLoad).IsForwardingAddress());
+ if (!static_cast<ConcreteVisitor*>(this)->ShouldVisitMapPointer()) return;
+ static_cast<ConcreteVisitor*>(this)->VisitMapPointer(host);
}
#define VISIT(TypeName) \
@@ -167,7 +168,7 @@ ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
if (visitor->ShouldVisitMapPointer()) {
visitor->VisitMapPointer(object);
}
- return static_cast<ResultType>(object.size());
+ return static_cast<ResultType>(object.size(kRelaxedLoad));
}
template <typename ConcreteVisitor>
diff --git a/chromium/v8/src/heap/objects-visiting.h b/chromium/v8/src/heap/objects-visiting.h
index 9f133d6cfab..7ea322dfb99 100644
--- a/chromium/v8/src/heap/objects-visiting.h
+++ b/chromium/v8/src/heap/objects-visiting.h
@@ -52,8 +52,11 @@ namespace internal {
V(SyntheticModule) \
V(TransitionArray) \
IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmExportedFunctionData) \
+ IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmIndirectFunctionTable) \
IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmJSFunctionData) \
IF_WASM(V, WasmStruct) \
IF_WASM(V, WasmTypeInfo)
@@ -78,6 +81,8 @@ class HeapVisitor : public ObjectVisitor {
public:
V8_INLINE ResultType Visit(HeapObject object);
V8_INLINE ResultType Visit(Map map, HeapObject object);
+ // A callback for visiting the map pointer in the object header.
+ V8_INLINE void VisitMapPointer(HeapObject host);
protected:
// A guard predicate for visiting the object.
@@ -86,8 +91,6 @@ class HeapVisitor : public ObjectVisitor {
V8_INLINE bool ShouldVisit(HeapObject object) { return true; }
// Guard predicate for visiting the objects map pointer separately.
V8_INLINE bool ShouldVisitMapPointer() { return true; }
- // A callback for visiting the map pointer in the object header.
- V8_INLINE void VisitMapPointer(HeapObject host);
// If this predicate returns false, then the heap visitor will fail
// in default Visit implemention for subclasses of JSObject.
V8_INLINE bool AllowDefaultJSObjectVisit() { return true; }
diff --git a/chromium/v8/src/heap/paged-spaces-inl.h b/chromium/v8/src/heap/paged-spaces-inl.h
index e135e30efc3..8c77186583d 100644
--- a/chromium/v8/src/heap/paged-spaces-inl.h
+++ b/chromium/v8/src/heap/paged-spaces-inl.h
@@ -131,6 +131,7 @@ AllocationResult PagedSpace::AllocateFastAligned(
AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
if (!EnsureLabMain(size_in_bytes, origin)) {
return AllocationResult::Retry(identity());
}
@@ -153,6 +154,7 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
DCHECK_EQ(identity(), OLD_SPACE);
int allocation_size = size_in_bytes;
// We don't know exactly how much filler we need to align until space is
@@ -182,6 +184,7 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
AllocationAlignment alignment,
AllocationOrigin origin) {
+ DCHECK(!FLAG_enable_third_party_heap);
AllocationResult result;
if (alignment != kWordAligned) {
diff --git a/chromium/v8/src/heap/paged-spaces.cc b/chromium/v8/src/heap/paged-spaces.cc
index f541974a506..8543f109ede 100644
--- a/chromium/v8/src/heap/paged-spaces.cc
+++ b/chromium/v8/src/heap/paged-spaces.cc
@@ -731,7 +731,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
}
CHECK(allocation_pointer_found_in_space);
- if (identity() == OLD_SPACE) {
+ if (identity() == OLD_SPACE && !FLAG_concurrent_array_buffer_sweeping) {
size_t bytes = heap()->array_buffer_sweeper()->old().BytesSlow();
CHECK_EQ(bytes,
ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer));
@@ -832,8 +832,8 @@ void PagedSpace::PrepareForMarkCompact() {
bool PagedSpace::RefillLabMain(int size_in_bytes, AllocationOrigin origin) {
VMState<GC> state(heap()->isolate());
- RuntimeCallTimerScope runtime_timer(
- heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
+ RCS_SCOPE(heap()->isolate(),
+ RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
return RawRefillLabMain(size_in_bytes, origin);
}
diff --git a/chromium/v8/src/heap/read-only-heap.cc b/chromium/v8/src/heap/read-only-heap.cc
index d5f7e843efe..05ca965e082 100644
--- a/chromium/v8/src/heap/read-only-heap.cc
+++ b/chromium/v8/src/heap/read-only-heap.cc
@@ -249,6 +249,10 @@ bool ReadOnlyHeap::read_only_object_cache_is_initialized() const {
return read_only_object_cache_.size() > 0;
}
+size_t ReadOnlyHeap::read_only_object_cache_size() const {
+ return read_only_object_cache_.size();
+}
+
ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator(ReadOnlyHeap* ro_heap)
: ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {}
diff --git a/chromium/v8/src/heap/read-only-heap.h b/chromium/v8/src/heap/read-only-heap.h
index f947832c5f9..558a694c944 100644
--- a/chromium/v8/src/heap/read-only-heap.h
+++ b/chromium/v8/src/heap/read-only-heap.h
@@ -81,14 +81,15 @@ class ReadOnlyHeap {
// Returns a read-only cache entry at a particular index.
Object cached_read_only_object(size_t i) const;
bool read_only_object_cache_is_initialized() const;
+ size_t read_only_object_cache_size() const;
ReadOnlySpace* read_only_space() const { return read_only_space_; }
// Returns whether the ReadOnlySpace will actually be shared taking into
// account whether shared memory is available with pointer compression.
static bool IsReadOnlySpaceShared() {
- return V8_SHARED_RO_HEAP_BOOL && (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL ||
- IsSharedMemoryAvailable());
+ return V8_SHARED_RO_HEAP_BOOL &&
+ (!COMPRESS_POINTERS_BOOL || COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL);
}
virtual void InitializeIsolateRoots(Isolate* isolate) {}
diff --git a/chromium/v8/src/heap/read-only-spaces.cc b/chromium/v8/src/heap/read-only-spaces.cc
index 5adac66afe0..248b5c22e6f 100644
--- a/chromium/v8/src/heap/read-only-spaces.cc
+++ b/chromium/v8/src/heap/read-only-spaces.cc
@@ -56,7 +56,18 @@ void ReadOnlyArtifacts::VerifyChecksum(SnapshotData* read_only_snapshot_data,
CHECK_WITH_MSG(snapshot_checksum,
"Attempt to create the read-only heap after already "
"creating from a snapshot.");
- CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
+ if (!FLAG_stress_snapshot) {
+ // --stress-snapshot is only intended to check how well the
+ // serializer/deserializer copes with unexpected objects, and is not
+ // intended to test whether the newly deserialized Isolate would actually
+ // work since it serializes a currently running Isolate, which is not
+ // supported. As a result, it's possible that it will create a new
+ // read-only snapshot that is not compatible with the original one (for
+ // instance due to the string table being re-ordered). Since we won't
+ // acutally use that new Isoalte, we're ok with any potential corruption.
+ // See crbug.com/1043058.
+ CHECK_EQ(read_only_blob_checksum_, snapshot_checksum);
+ }
} else {
// If there's no checksum, then that means the read-only heap objects are
// being created.
@@ -70,11 +81,10 @@ SingleCopyReadOnlyArtifacts::~SingleCopyReadOnlyArtifacts() {
// TearDown requires MemoryAllocator which itself is tied to an Isolate.
shared_read_only_space_->pages_.resize(0);
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
for (ReadOnlyPage* chunk : pages_) {
void* chunk_address = reinterpret_cast<void*>(chunk->address());
- size_t size = RoundUp(chunk->size(), page_allocator->AllocatePageSize());
- CHECK(page_allocator->FreePages(chunk_address, size));
+ size_t size = RoundUp(chunk->size(), page_allocator_->AllocatePageSize());
+ CHECK(page_allocator_->FreePages(chunk_address, size));
}
}
@@ -86,6 +96,12 @@ ReadOnlyHeap* SingleCopyReadOnlyArtifacts::GetReadOnlyHeapForIsolate(
void SingleCopyReadOnlyArtifacts::Initialize(Isolate* isolate,
std::vector<ReadOnlyPage*>&& pages,
const AllocationStats& stats) {
+ // Do not use the platform page allocator when sharing a pointer compression
+ // cage, as the Isolate's page allocator is a BoundedPageAllocator tied to the
+ // shared cage.
+ page_allocator_ = COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL
+ ? isolate->page_allocator()
+ : GetPlatformPageAllocator();
pages_ = std::move(pages);
set_accounting_stats(stats);
set_shared_read_only_space(
@@ -304,11 +320,12 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared());
Heap* heap = ReadOnlySpace::heap();
- // Without pointer compression, ReadOnlySpace pages are directly shared
- // between all heaps and so must be unregistered from their originating
- // allocator.
- Seal(COMPRESS_POINTERS_BOOL ? SealMode::kDetachFromHeap
- : SealMode::kDetachFromHeapAndUnregisterMemory);
+ // Without pointer compression in a per-Isolate cage, ReadOnlySpace pages are
+ // directly shared between all heaps and so must be unregistered from their
+ // originating allocator.
+ Seal(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
+ ? SealMode::kDetachFromHeap
+ : SealMode::kDetachFromHeapAndUnregisterMemory);
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
}
@@ -635,6 +652,7 @@ HeapObject ReadOnlySpace::TryAllocateLinearlyAligned(
AllocationResult ReadOnlySpace::AllocateRawAligned(
int size_in_bytes, AllocationAlignment alignment) {
+ DCHECK(!FLAG_enable_third_party_heap);
DCHECK(!IsDetached());
int allocation_size = size_in_bytes;
@@ -789,9 +807,9 @@ SharedReadOnlySpace::SharedReadOnlySpace(Heap* heap,
SingleCopyReadOnlyArtifacts* artifacts)
: SharedReadOnlySpace(heap) {
// This constructor should only be used when RO_SPACE is shared without
- // pointer compression.
+ // pointer compression in a per-Isolate cage.
DCHECK(V8_SHARED_RO_HEAP_BOOL);
- DCHECK(!COMPRESS_POINTERS_BOOL);
+ DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL);
accounting_stats_ = artifacts->accounting_stats();
pages_ = artifacts->pages();
}
diff --git a/chromium/v8/src/heap/read-only-spaces.h b/chromium/v8/src/heap/read-only-spaces.h
index ee4b2a82234..0ca05d8d4c8 100644
--- a/chromium/v8/src/heap/read-only-spaces.h
+++ b/chromium/v8/src/heap/read-only-spaces.h
@@ -132,6 +132,9 @@ class SingleCopyReadOnlyArtifacts : public ReadOnlyArtifacts {
const AllocationStats& stats) override;
void ReinstallReadOnlySpace(Isolate* isolate) override;
void VerifyHeapAndSpaceRelationships(Isolate* isolate) override;
+
+ private:
+ v8::PageAllocator* page_allocator_ = nullptr;
};
// -----------------------------------------------------------------------------
diff --git a/chromium/v8/src/heap/scavenger-inl.h b/chromium/v8/src/heap/scavenger-inl.h
index 8560b5b62ba..193565d34de 100644
--- a/chromium/v8/src/heap/scavenger-inl.h
+++ b/chromium/v8/src/heap/scavenger-inl.h
@@ -119,7 +119,7 @@ void Scavenger::PageMemoryFence(MaybeObject object) {
bool Scavenger::MigrateObject(Map map, HeapObject source, HeapObject target,
int size) {
// Copy the content of source to target.
- target.set_map_word(MapWord::FromMap(map));
+ target.set_map_word(MapWord::FromMap(map), kRelaxedStore);
heap()->CopyBlock(target.address() + kTaggedSize,
source.address() + kTaggedSize, size - kTaggedSize);
@@ -159,7 +159,7 @@ CopyAndForwardResult Scavenger::SemiSpaceCopyObject(
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(NEW_SPACE, target, object_size);
- MapWord map_word = object.synchronized_map_word();
+ MapWord map_word = object.map_word(kAcquireLoad);
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
@@ -184,6 +184,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
static_assert(std::is_same<THeapObjectSlot, FullHeapObjectSlot>::value ||
std::is_same<THeapObjectSlot, HeapObjectSlot>::value,
"Only FullHeapObjectSlot and HeapObjectSlot are expected here");
+ DCHECK_GE(object_size, Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
AllocationResult allocation = allocator_.Allocate(
OLD_SPACE, object_size, AllocationOrigin::kGC, alignment);
@@ -195,7 +196,7 @@ CopyAndForwardResult Scavenger::PromoteObject(Map map, THeapObjectSlot slot,
const bool self_success = MigrateObject(map, object, target, object_size);
if (!self_success) {
allocator_.FreeLast(OLD_SPACE, target, object_size);
- MapWord map_word = object.synchronized_map_word();
+ MapWord map_word = object.map_word(kAcquireLoad);
HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
DCHECK(!Heap::InFromPage(*slot));
return Heap::InToPage(*slot)
@@ -326,24 +327,25 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
HeapObjectReference::Update(slot, first);
if (!Heap::InYoungGeneration(first)) {
- object.synchronized_set_map_word(MapWord::FromForwardingAddress(first));
+ object.set_map_word(MapWord::FromForwardingAddress(first), kReleaseStore);
return REMOVE_SLOT;
}
- MapWord first_word = first.synchronized_map_word();
+ MapWord first_word = first.map_word(kAcquireLoad);
if (first_word.IsForwardingAddress()) {
HeapObject target = first_word.ToForwardingAddress();
HeapObjectReference::Update(slot, target);
- object.synchronized_set_map_word(MapWord::FromForwardingAddress(target));
+ object.set_map_word(MapWord::FromForwardingAddress(target),
+ kReleaseStore);
return Heap::InYoungGeneration(target) ? KEEP_SLOT : REMOVE_SLOT;
}
Map map = first_word.ToMap();
SlotCallbackResult result =
EvacuateObjectDefault(map, slot, first, first.SizeFromMap(map),
Map::ObjectFieldsFrom(map.visitor_id()));
- object.synchronized_set_map_word(
- MapWord::FromForwardingAddress(slot.ToHeapObject()));
+ object.set_map_word(MapWord::FromForwardingAddress(slot.ToHeapObject()),
+ kReleaseStore);
return result;
}
DCHECK_EQ(ObjectFields::kMaybePointers,
@@ -390,7 +392,7 @@ SlotCallbackResult Scavenger::ScavengeObject(THeapObjectSlot p,
DCHECK(Heap::InFromPage(object));
// Synchronized load that consumes the publishing CAS of MigrateObject.
- MapWord first_word = object.synchronized_map_word();
+ MapWord first_word = object.map_word(kAcquireLoad);
// If the first word is a forwarding address, the object has already been
// copied.
diff --git a/chromium/v8/src/heap/scavenger.cc b/chromium/v8/src/heap/scavenger.cc
index be9971e7c68..efa3ed2f614 100644
--- a/chromium/v8/src/heap/scavenger.cc
+++ b/chromium/v8/src/heap/scavenger.cc
@@ -130,13 +130,13 @@ namespace {
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, Object object) {
return Heap::InFromPage(object) &&
- !HeapObject::cast(object).map_word().IsForwardingAddress();
+ !HeapObject::cast(object).map_word(kRelaxedLoad).IsForwardingAddress();
}
// Same as IsUnscavengedHeapObject() above but specialized for HeapObjects.
V8_INLINE bool IsUnscavengedHeapObject(Heap* heap, HeapObject heap_object) {
return Heap::InFromPage(heap_object) &&
- !heap_object.map_word().IsForwardingAddress();
+ !heap_object.map_word(kRelaxedLoad).IsForwardingAddress();
}
bool IsUnscavengedHeapObjectSlot(Heap* heap, FullObjectSlot p) {
@@ -152,7 +152,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
return object;
}
- MapWord map_word = HeapObject::cast(object).map_word();
+ MapWord map_word = HeapObject::cast(object).map_word(kRelaxedLoad);
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
@@ -384,6 +384,10 @@ void ScavengerCollector::CollectGarbage() {
&Heap::UpdateYoungReferenceInExternalStringTableEntry);
heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
+
+ if (V8_UNLIKELY(FLAG_track_retaining_path)) {
+ heap_->UpdateRetainersAfterScavenge();
+ }
}
if (FLAG_concurrent_marking) {
@@ -481,7 +485,7 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
Map map = update_info.second;
// Order is important here. We have to re-install the map to have access
// to meta-data like size during page promotion.
- object.set_map_word(MapWord::FromMap(map));
+ object.set_map_word(MapWord::FromMap(map), kRelaxedStore);
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
@@ -734,6 +738,7 @@ void Scavenger::AddEphemeronHashTable(EphemeronHashTable table) {
void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
FullObjectSlot p) {
DCHECK(!HasWeakHeapObjectTag(*p));
+ DCHECK(!MapWord::IsPacked((*p).ptr()));
ScavengePointer(p);
}
@@ -741,12 +746,15 @@ void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
FullObjectSlot start,
FullObjectSlot end) {
// Copy all HeapObject pointers in [start, end)
- for (FullObjectSlot p = start; p < end; ++p) ScavengePointer(p);
+ for (FullObjectSlot p = start; p < end; ++p) {
+ ScavengePointer(p);
+ }
}
void RootScavengeVisitor::ScavengePointer(FullObjectSlot p) {
Object object = *p;
DCHECK(!HasWeakHeapObjectTag(object));
+ DCHECK(!MapWord::IsPacked(object.ptr()));
if (Heap::InYoungGeneration(object)) {
scavenger_->ScavengeObject(FullHeapObjectSlot(p), HeapObject::cast(object));
}
diff --git a/chromium/v8/src/heap/setup-heap-internal.cc b/chromium/v8/src/heap/setup-heap-internal.cc
index 8a3e1fda121..886c89aeae0 100644
--- a/chromium/v8/src/heap/setup-heap-internal.cc
+++ b/chromium/v8/src/heap/setup-heap-internal.cc
@@ -195,9 +195,10 @@ void Heap::FinalizePartialMap(Map map) {
map.set_constructor_or_back_pointer(roots.null_value());
}
-AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
- DCHECK(map.instance_type() != MAP_TYPE);
- int size = map.instance_size();
+AllocationResult Heap::Allocate(Handle<Map> map,
+ AllocationType allocation_type) {
+ DCHECK(map->instance_type() != MAP_TYPE);
+ int size = map->instance_size();
HeapObject result;
AllocationResult allocation = AllocateRaw(size, allocation_type);
if (!allocation.To(&result)) return allocation;
@@ -205,7 +206,7 @@ AllocationResult Heap::Allocate(Map map, AllocationType allocation_type) {
WriteBarrierMode write_barrier_mode =
allocation_type == AllocationType::kYoung ? SKIP_WRITE_BARRIER
: UPDATE_WRITE_BARRIER;
- result.set_map_after_allocation(map, write_barrier_mode);
+ result.set_map_after_allocation(*map, write_barrier_mode);
return result;
}
@@ -281,7 +282,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.null_map(), AllocationType::kReadOnly);
+ Allocate(roots.null_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
@@ -289,7 +290,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.undefined_map(), AllocationType::kReadOnly);
+ Allocate(roots.undefined_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
@@ -297,7 +298,7 @@ bool Heap::CreateInitialMaps() {
DCHECK(!InYoungGeneration(roots.undefined_value()));
{
AllocationResult allocation =
- Allocate(roots.the_hole_map(), AllocationType::kReadOnly);
+ Allocate(roots.the_hole_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_the_hole_value(Oddball::cast(obj));
@@ -317,7 +318,7 @@ bool Heap::CreateInitialMaps() {
// Allocate the empty enum cache.
{
AllocationResult allocation =
- Allocate(roots.enum_cache_map(), AllocationType::kReadOnly);
+ Allocate(roots.enum_cache_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_empty_enum_cache(EnumCache::cast(obj));
@@ -381,6 +382,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol,
Context::SYMBOL_FUNCTION_INDEX)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
+ ALLOCATE_MAP(MEGA_DOM_HANDLER_TYPE, MegaDomHandler::kSize, mega_dom_handler)
ALLOCATE_PRIMITIVE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean,
Context::BOOLEAN_FUNCTION_INDEX);
@@ -500,6 +502,10 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(CODE_DATA_CONTAINER_TYPE, CodeDataContainer::kSize,
code_data_container)
+ IF_WASM(ALLOCATE_MAP, WASM_EXPORTED_FUNCTION_DATA_TYPE,
+ WasmExportedFunctionData::kSize, wasm_exported_function_data)
+ IF_WASM(ALLOCATE_MAP, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData::kSize,
+ wasm_js_function_data)
IF_WASM(ALLOCATE_MAP, WASM_TYPE_INFO_TYPE, WasmTypeInfo::kSize,
wasm_type_info)
@@ -550,8 +556,9 @@ bool Heap::CreateInitialMaps() {
{
// Empty array boilerplate description
- AllocationResult alloc = Allocate(roots.array_boilerplate_description_map(),
- AllocationType::kReadOnly);
+ AllocationResult alloc =
+ Allocate(roots.array_boilerplate_description_map_handle(),
+ AllocationType::kReadOnly);
if (!alloc.To(&obj)) return false;
ArrayBoilerplateDescription::cast(obj).set_constant_elements(
@@ -564,7 +571,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.boolean_map(), AllocationType::kReadOnly);
+ Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_true_value(Oddball::cast(obj));
@@ -572,7 +579,7 @@ bool Heap::CreateInitialMaps() {
{
AllocationResult allocation =
- Allocate(roots.boolean_map(), AllocationType::kReadOnly);
+ Allocate(roots.boolean_map_handle(), AllocationType::kReadOnly);
if (!allocation.To(&obj)) return false;
}
set_false_value(Oddball::cast(obj));
@@ -864,6 +871,7 @@ void Heap::CreateInitialObjects() {
set_is_concat_spreadable_protector(*factory->NewProtector());
set_map_iterator_protector(*factory->NewProtector());
set_no_elements_protector(*factory->NewProtector());
+ set_mega_dom_protector(*factory->NewProtector());
set_promise_hook_protector(*factory->NewProtector());
set_promise_resolve_protector(*factory->NewProtector());
set_promise_species_protector(*factory->NewProtector());
diff --git a/chromium/v8/src/heap/spaces-inl.h b/chromium/v8/src/heap/spaces-inl.h
index ccdf050e681..9b9a02af437 100644
--- a/chromium/v8/src/heap/spaces-inl.h
+++ b/chromium/v8/src/heap/spaces-inl.h
@@ -141,7 +141,8 @@ AllocationResult LocalAllocationBuffer::AllocateRawAligned(
int filler_size = Heap::GetFillToAlign(current_top, alignment);
Address new_top = current_top + filler_size + size_in_bytes;
- if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
+ if (new_top > allocation_info_.limit())
+ return AllocationResult::Retry(NEW_SPACE);
allocation_info_.set_top(new_top);
if (filler_size > 0) {
diff --git a/chromium/v8/src/heap/spaces.cc b/chromium/v8/src/heap/spaces.cc
index b6c1e0bcc2a..63346786d51 100644
--- a/chromium/v8/src/heap/spaces.cc
+++ b/chromium/v8/src/heap/spaces.cc
@@ -11,6 +11,7 @@
#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/macros.h"
+#include "src/base/sanitizer/msan.h"
#include "src/common/globals.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@@ -31,7 +32,6 @@
#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
-#include "src/sanitizer/msan.h"
#include "src/snapshot/snapshot.h"
#include "src/utils/ostreams.h"
diff --git a/chromium/v8/src/heap/spaces.h b/chromium/v8/src/heap/spaces.h
index 0a0a884dc02..4afada00ceb 100644
--- a/chromium/v8/src/heap/spaces.h
+++ b/chromium/v8/src/heap/spaces.h
@@ -221,9 +221,11 @@ class Page : public MemoryChunk {
// from [page_addr .. page_addr + kPageSize[. This only works if the object
// is in fact in a page.
static Page* FromAddress(Address addr) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
}
static Page* FromHeapObject(HeapObject o) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
}
@@ -232,6 +234,7 @@ class Page : public MemoryChunk {
// we subtract a hole word. The valid address ranges from
// [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
static Page* FromAllocationAreaAddress(Address address) {
+ DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
return Page::FromAddress(address - kTaggedSize);
}
diff --git a/chromium/v8/src/heap/sweeper.cc b/chromium/v8/src/heap/sweeper.cc
index c4e60be7072..0e35a3cea2b 100644
--- a/chromium/v8/src/heap/sweeper.cc
+++ b/chromium/v8/src/heap/sweeper.cc
@@ -351,9 +351,6 @@ int Sweeper::RawSweep(
size_t live_bytes = 0;
size_t max_freed_bytes = 0;
- // TODO(ulan): we don't have to clear type old-to-old slots in code space
- // because the concurrent marker doesn't mark code objects. This requires
- // the write barrier for code objects to check the color of the code object.
bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
p->typed_slot_set<OLD_TO_OLD>() != nullptr;
@@ -393,7 +390,8 @@ int Sweeper::RawSweep(
free_start, free_end, p, non_empty_typed_slots, &free_ranges_map,
&old_to_new_cleanup);
}
- Map map = object.synchronized_map();
+ Map map = object.map(kAcquireLoad);
+ DCHECK(map.IsMap());
int size = object.SizeFromMap(map);
live_bytes += size;
free_start = free_end + size;
diff --git a/chromium/v8/src/heap/third-party/heap-api-stub.cc b/chromium/v8/src/heap/third-party/heap-api-stub.cc
index 6d31479bec6..f7ccb50810a 100644
--- a/chromium/v8/src/heap/third-party/heap-api-stub.cc
+++ b/chromium/v8/src/heap/third-party/heap-api-stub.cc
@@ -38,6 +38,12 @@ const base::AddressRegion& Heap::GetCodeRange() {
}
// static
+bool Heap::InSpace(Address, AllocationSpace) { return false; }
+
+// static
+bool Heap::InOldSpace(Address) { return false; }
+
+// static
bool Heap::InCodeSpace(Address) { return false; }
// static
diff --git a/chromium/v8/src/heap/third-party/heap-api.h b/chromium/v8/src/heap/third-party/heap-api.h
index c4712b988e9..16f2fde8842 100644
--- a/chromium/v8/src/heap/third-party/heap-api.h
+++ b/chromium/v8/src/heap/third-party/heap-api.h
@@ -26,6 +26,10 @@ class Heap {
const base::AddressRegion& GetCodeRange();
+ static bool InSpace(Address address, AllocationSpace space);
+
+ static bool InOldSpace(Address address);
+
static bool InCodeSpace(Address address);
static bool InReadOnlySpace(Address address);
@@ -38,6 +42,8 @@ class Heap {
HeapObject NextObject();
bool CollectGarbage();
+
+ size_t Capacity();
};
} // namespace third_party_heap
diff --git a/chromium/v8/src/heap/weak-object-worklists.cc b/chromium/v8/src/heap/weak-object-worklists.cc
index 532739000fe..84df473076f 100644
--- a/chromium/v8/src/heap/weak-object-worklists.cc
+++ b/chromium/v8/src/heap/weak-object-worklists.cc
@@ -115,19 +115,17 @@ void WeakObjects::UpdateWeakObjectsInCode(
void WeakObjects::UpdateJSWeakRefs(
WeakObjectWorklist<JSWeakRef>& js_weak_refs) {
- if (FLAG_harmony_weak_refs) {
- js_weak_refs.Update(
- [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
- JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
-
- if (!forwarded.is_null()) {
- *js_weak_ref_out = forwarded;
- return true;
- }
-
- return false;
- });
- }
+ js_weak_refs.Update(
+ [](JSWeakRef js_weak_ref_in, JSWeakRef* js_weak_ref_out) -> bool {
+ JSWeakRef forwarded = ForwardingAddress(js_weak_ref_in);
+
+ if (!forwarded.is_null()) {
+ *js_weak_ref_out = forwarded;
+ return true;
+ }
+
+ return false;
+ });
}
void WeakObjects::UpdateWeakCells(WeakObjectWorklist<WeakCell>& weak_cells) {
diff --git a/chromium/v8/src/ic/accessor-assembler.cc b/chromium/v8/src/ic/accessor-assembler.cc
index 35d1da5cd92..0ff67d030af 100644
--- a/chromium/v8/src/ic/accessor-assembler.cc
+++ b/chromium/v8/src/ic/accessor-assembler.cc
@@ -8,6 +8,7 @@
#include "src/base/optional.h"
#include "src/builtins/builtins-constructor-gen.h"
#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/ic/handler-configuration.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
@@ -16,6 +17,7 @@
#include "src/objects/cell.h"
#include "src/objects/foreign.h"
#include "src/objects/heap-number.h"
+#include "src/objects/megadom-handler.h"
#include "src/objects/module.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property-details.h"
@@ -134,6 +136,55 @@ void AccessorAssembler::HandlePolymorphicCase(
}
}
+void AccessorAssembler::TryMegaDOMCase(TNode<Object> lookup_start_object,
+ TNode<Map> lookup_start_object_map,
+ TVariable<MaybeObject>* var_handler,
+ TNode<Object> vector,
+ TNode<TaggedIndex> slot, Label* miss,
+ ExitPoint* exit_point) {
+ // Check if the receiver is a JS_API_OBJECT
+ GotoIfNot(IsJSApiObjectMap(lookup_start_object_map), miss);
+
+ // Check if receiver requires access check
+ GotoIf(IsSetWord32<Map::Bits1::IsAccessCheckNeededBit>(
+ LoadMapBitField(lookup_start_object_map)),
+ miss);
+
+ CSA_ASSERT(this, TaggedEqual(LoadFeedbackVectorSlot(CAST(vector), slot),
+ MegaDOMSymbolConstant()));
+
+ // In some cases, we load the
+ TNode<MegaDomHandler> handler;
+ if (var_handler->IsBound()) {
+ handler = CAST(var_handler->value());
+ } else {
+ TNode<MaybeObject> maybe_handler =
+ LoadFeedbackVectorSlot(CAST(vector), slot, kTaggedSize);
+ CSA_ASSERT(this, IsStrong(maybe_handler));
+ handler = CAST(maybe_handler);
+ }
+
+ // Check if dom protector cell is still valid
+ GotoIf(IsMegaDOMProtectorCellInvalid(), miss);
+
+ // Load the getter
+ TNode<MaybeObject> maybe_getter = LoadMegaDomHandlerAccessor(handler);
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_getter));
+ TNode<FunctionTemplateInfo> getter =
+ CAST(GetHeapObjectAssumeWeak(maybe_getter, miss));
+
+ // Load the accessor context
+ TNode<MaybeObject> maybe_context = LoadMegaDomHandlerContext(handler);
+ CSA_ASSERT(this, IsWeakOrCleared(maybe_context));
+ TNode<Context> context = CAST(GetHeapObjectAssumeWeak(maybe_context, miss));
+
+ // TODO(gsathya): This builtin throws an exception on interface check fail but
+ // we should miss to the runtime.
+ exit_point->Return(
+ CallBuiltin(Builtins::kCallFunctionTemplate_CheckCompatibleReceiver,
+ context, getter, IntPtrConstant(0), lookup_start_object));
+}
+
void AccessorAssembler::HandleLoadICHandlerCase(
const LazyLoadICParameters* p, TNode<Object> handler, Label* miss,
ExitPoint* exit_point, ICMode ic_mode, OnNonExistent on_nonexistent,
@@ -543,7 +594,7 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
properties, var_name_index.value(), &var_details, &var_value);
TNode<Object> value = CallGetterIfAccessor(
var_value.value(), CAST(holder), var_details.value(), p->context(),
- p->receiver(), miss);
+ p->receiver(), p->name(), miss);
exit_point->Return(value);
}
}
@@ -563,11 +614,17 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
}
BIND(&native_data_property);
- HandleLoadCallbackProperty(p, CAST(holder), handler_word, exit_point);
+ {
+ GotoIf(IsSideEffectFreeDebuggingActive(), &slow);
+ HandleLoadCallbackProperty(p, CAST(holder), handler_word, exit_point);
+ }
BIND(&api_getter);
- HandleLoadAccessor(p, CAST(holder), handler_word, CAST(handler), handler_kind,
- exit_point);
+ {
+ GotoIf(IsSideEffectFreeDebuggingActive(), &slow);
+ HandleLoadAccessor(p, CAST(holder), handler_word, CAST(handler),
+ handler_kind, exit_point);
+ }
BIND(&proxy);
{
@@ -627,7 +684,8 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
GotoIf(IsTheHole(value), miss);
exit_point->Return(CallGetterIfAccessor(value, CAST(holder), details,
- p->context(), p->receiver(), miss));
+ p->context(), p->receiver(),
+ p->name(), miss));
}
BIND(&interceptor);
@@ -645,8 +703,9 @@ void AccessorAssembler::HandleLoadICSmiHandlerLoadNamedCase(
p->name(), p->slot(), p->vector());
} else {
- exit_point->ReturnCallRuntime(Runtime::kGetProperty, p->context(), holder,
- p->name(), p->receiver());
+ exit_point->ReturnCallRuntime(Runtime::kGetProperty, p->context(),
+ p->lookup_start_object(), p->name(),
+ p->receiver());
}
}
@@ -912,7 +971,7 @@ void AccessorAssembler::HandleLoadICProtoHandler(
properties, name_index, &var_details, &var_value);
TNode<Object> value = CallGetterIfAccessor(
var_value.value(), CAST(var_holder->value()), var_details.value(),
- p->context(), p->receiver(), miss);
+ p->context(), p->receiver(), p->name(), miss);
exit_point->Return(value);
}
},
@@ -1507,7 +1566,6 @@ void AccessorAssembler::HandleStoreAccessor(const StoreICParameters* p,
LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
CSA_ASSERT(this, Word32BinaryNot(IsTheHole(setter)));
- Callable callable = CodeFactory::Call(isolate());
Return(Call(p->context(), setter, p->receiver(), p->value()));
}
@@ -1686,9 +1744,12 @@ void AccessorAssembler::HandleStoreICProtoHandler(
Goto(&store);
BIND(&store);
- TNode<IntPtrT> argc = IntPtrConstant(1);
- Return(CallApiCallback(context, callback, argc, data, api_holder.value(),
- p->receiver(), p->value()));
+ {
+ GotoIf(IsSideEffectFreeDebuggingActive(), &if_slow);
+ TNode<IntPtrT> argc = IntPtrConstant(1);
+ Return(CallApiCallback(context, callback, argc, data,
+ api_holder.value(), p->receiver(), p->value()));
+ }
}
BIND(&if_store_global_proxy);
@@ -2036,9 +2097,9 @@ void AccessorAssembler::EmitElementLoad(
Label* if_hole, Label* rebox_double, TVariable<Float64T>* var_double_value,
Label* unimplemented_elements_kind, Label* out_of_bounds, Label* miss,
ExitPoint* exit_point, LoadAccessMode access_mode) {
- Label if_typed_array(this), if_fast(this), if_fast_packed(this),
- if_fast_holey(this), if_fast_double(this), if_fast_holey_double(this),
- if_nonfast(this), if_dictionary(this);
+ Label if_rab_gsab_typed_array(this), if_typed_array(this), if_fast(this),
+ if_fast_packed(this), if_fast_holey(this), if_fast_double(this),
+ if_fast_holey_double(this), if_nonfast(this), if_dictionary(this);
Branch(Int32GreaterThan(elements_kind,
Int32Constant(LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)),
&if_nonfast, &if_fast);
@@ -2119,7 +2180,16 @@ void AccessorAssembler::EmitElementLoad(
BIND(&if_nonfast);
{
- STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ Label uint8_elements(this), int8_elements(this), uint16_elements(this),
+ int16_elements(this), uint32_elements(this), int32_elements(this),
+ float32_elements(this), float64_elements(this), bigint64_elements(this),
+ biguint64_elements(this);
+ STATIC_ASSERT(LAST_ELEMENTS_KIND ==
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ GotoIf(Int32GreaterThanOrEqual(
+ elements_kind,
+ Int32Constant(FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+ &if_rab_gsab_typed_array);
GotoIf(Int32GreaterThanOrEqual(
elements_kind,
Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
@@ -2145,88 +2215,129 @@ void AccessorAssembler::EmitElementLoad(
exit_point->Return(access_mode == LoadAccessMode::kHas ? TrueConstant()
: value);
}
-
- BIND(&if_typed_array);
{
- Comment("typed elements");
- // Check if buffer has been detached.
- TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
- GotoIf(IsDetachedBuffer(buffer), miss);
-
- // Bounds check.
- TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
- GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
- if (access_mode == LoadAccessMode::kHas) {
- exit_point->Return(TrueConstant());
- } else {
- TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(CAST(object));
-
- Label uint8_elements(this), int8_elements(this), uint16_elements(this),
- int16_elements(this), uint32_elements(this), int32_elements(this),
- float32_elements(this), float64_elements(this),
- bigint64_elements(this), biguint64_elements(this);
- Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements,
- &bigint64_elements, &biguint64_elements};
- int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
- BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
- const size_t kTypedElementsKindCount =
- LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
- Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
+ TVARIABLE(RawPtrT, data_ptr);
+ BIND(&if_rab_gsab_typed_array);
+ {
+ Comment("rab gsab typed elements");
+ Label variable_length(this), normal(this), length_check_ok(this);
+
+ TNode<JSTypedArray> array = CAST(object);
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(array);
+
+ // Bounds check (incl. detachedness check).
+ TNode<UintPtrT> length =
+ LoadVariableLengthJSTypedArrayLength(array, buffer, miss);
+ Branch(UintPtrLessThan(intptr_index, length), &length_check_ok,
+ out_of_bounds);
+ BIND(&length_check_ok);
+ {
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ data_ptr = LoadJSTypedArrayDataPtr(array);
+ Label* elements_kind_labels[] = {
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements,
+ &bigint64_elements, &biguint64_elements};
+ int32_t elements_kinds[] = {
+ RAB_GSAB_UINT8_ELEMENTS, RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ RAB_GSAB_INT8_ELEMENTS, RAB_GSAB_UINT16_ELEMENTS,
+ RAB_GSAB_INT16_ELEMENTS, RAB_GSAB_UINT32_ELEMENTS,
+ RAB_GSAB_INT32_ELEMENTS, RAB_GSAB_FLOAT32_ELEMENTS,
+ RAB_GSAB_FLOAT64_ELEMENTS, RAB_GSAB_BIGINT64_ELEMENTS,
+ RAB_GSAB_BIGUINT64_ELEMENTS};
+ const size_t kTypedElementsKindCount =
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+ }
+ }
+ }
+ BIND(&if_typed_array);
+ {
+ Comment("typed elements");
+ // Check if buffer has been detached.
+ TNode<JSArrayBuffer> buffer = LoadJSArrayBufferViewBuffer(CAST(object));
+ GotoIf(IsDetachedBuffer(buffer), miss);
+
+ // Bounds check.
+ TNode<UintPtrT> length = LoadJSTypedArrayLength(CAST(object));
+ GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
+ if (access_mode == LoadAccessMode::kHas) {
+ exit_point->Return(TrueConstant());
+ } else {
+ data_ptr = LoadJSTypedArrayDataPtr(CAST(object));
+
+ Label* elements_kind_labels[] = {
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements,
+ &bigint64_elements, &biguint64_elements};
+ int32_t elements_kinds[] = {
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+ UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
+ INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS,
+ BIGINT64_ELEMENTS, BIGUINT64_ELEMENTS};
+ const size_t kTypedElementsKindCount =
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+ }
+ }
+ if (access_mode != LoadAccessMode::kHas) {
BIND(&uint8_elements);
{
Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
- TNode<Int32T> element = Load<Uint8T>(data_ptr, intptr_index);
+ TNode<Int32T> element = Load<Uint8T>(data_ptr.value(), intptr_index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&int8_elements);
{
Comment("INT8_ELEMENTS");
- TNode<Int32T> element = Load<Int8T>(data_ptr, intptr_index);
+ TNode<Int32T> element = Load<Int8T>(data_ptr.value(), intptr_index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&uint16_elements);
{
Comment("UINT16_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1));
- TNode<Int32T> element = Load<Uint16T>(data_ptr, index);
+ TNode<Int32T> element = Load<Uint16T>(data_ptr.value(), index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&int16_elements);
{
Comment("INT16_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(1));
- TNode<Int32T> element = Load<Int16T>(data_ptr, index);
+ TNode<Int32T> element = Load<Int16T>(data_ptr.value(), index);
exit_point->Return(SmiFromInt32(element));
}
BIND(&uint32_elements);
{
Comment("UINT32_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
- TNode<Uint32T> element = Load<Uint32T>(data_ptr, index);
+ TNode<Uint32T> element = Load<Uint32T>(data_ptr.value(), index);
exit_point->Return(ChangeUint32ToTagged(element));
}
BIND(&int32_elements);
{
Comment("INT32_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
- TNode<Int32T> element = Load<Int32T>(data_ptr, index);
+ TNode<Int32T> element = Load<Int32T>(data_ptr.value(), index);
exit_point->Return(ChangeInt32ToTagged(element));
}
BIND(&float32_elements);
{
Comment("FLOAT32_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(2));
- TNode<Float32T> element = Load<Float32T>(data_ptr, index);
+ TNode<Float32T> element = Load<Float32T>(data_ptr.value(), index);
*var_double_value = ChangeFloat32ToFloat64(element);
Goto(rebox_double);
}
@@ -2234,7 +2345,7 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("FLOAT64_ELEMENTS");
TNode<IntPtrT> index = WordShl(intptr_index, IntPtrConstant(3));
- TNode<Float64T> element = Load<Float64T>(data_ptr, index);
+ TNode<Float64T> element = Load<Float64T>(data_ptr.value(), index);
*var_double_value = element;
Goto(rebox_double);
}
@@ -2242,13 +2353,13 @@ void AccessorAssembler::EmitElementLoad(
{
Comment("BIGINT64_ELEMENTS");
exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- data_ptr, Unsigned(intptr_index), BIGINT64_ELEMENTS));
+ data_ptr.value(), Unsigned(intptr_index), BIGINT64_ELEMENTS));
}
BIND(&biguint64_elements);
{
Comment("BIGUINT64_ELEMENTS");
exit_point->Return(LoadFixedTypedArrayElementAsTagged(
- data_ptr, Unsigned(intptr_index), BIGUINT64_ELEMENTS));
+ data_ptr.value(), Unsigned(intptr_index), BIGUINT64_ELEMENTS));
}
}
}
@@ -2460,7 +2571,7 @@ void AccessorAssembler::GenericPropertyLoad(
{
TNode<Object> value = CallGetterIfAccessor(
var_value.value(), lookup_start_object, var_details.value(),
- p->context(), p->receiver(), slow);
+ p->context(), p->receiver(), p->name(), slow);
IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
Return(value);
}
@@ -2864,11 +2975,23 @@ void AccessorAssembler::LoadIC_Noninlined(const LoadICParameters* p,
DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
{
- // Check megamorphic case.
- GotoIfNot(TaggedEqual(feedback, MegamorphicSymbolConstant()), miss);
+ Label try_megamorphic(this), try_megadom(this);
+ GotoIf(TaggedEqual(feedback, MegamorphicSymbolConstant()),
+ &try_megamorphic);
+ GotoIf(TaggedEqual(feedback, MegaDOMSymbolConstant()), &try_megadom);
+ Goto(miss);
- TryProbeStubCache(isolate()->load_stub_cache(), p->lookup_start_object(),
- CAST(p->name()), if_handler, var_handler, miss);
+ BIND(&try_megamorphic);
+ {
+ TryProbeStubCache(isolate()->load_stub_cache(), p->lookup_start_object(),
+ CAST(p->name()), if_handler, var_handler, miss);
+ }
+
+ BIND(&try_megadom);
+ {
+ TryMegaDOMCase(p->lookup_start_object(), lookup_start_object_map,
+ var_handler, p->vector(), p->slot(), miss, exit_point);
+ }
}
}
@@ -2964,7 +3087,7 @@ void AccessorAssembler::LoadGlobalIC(TNode<HeapObject> maybe_feedback_vector,
BIND(&no_feedback);
{
int ic_kind =
- static_cast<int>((typeof_mode == INSIDE_TYPEOF)
+ static_cast<int>((typeof_mode == TypeofMode::kInside)
? FeedbackSlotKind::kLoadGlobalInsideTypeof
: FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
exit_point->ReturnCallStub(
@@ -3023,7 +3146,7 @@ void AccessorAssembler::LoadGlobalIC_TryHandlerCase(
TNode<Object> handler = CAST(feedback_element);
GotoIf(TaggedEqual(handler, UninitializedSymbolConstant()), miss);
- OnNonExistent on_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF
+ OnNonExistent on_nonexistent = typeof_mode == TypeofMode::kNotInside
? OnNonExistent::kThrowReferenceError
: OnNonExistent::kReturnUndefined;
@@ -3958,7 +4081,7 @@ void AccessorAssembler::GenerateLookupContextBaseline(TypeofMode typeof_mode) {
BIND(&slowpath);
{
auto name = Parameter<Object>(Descriptor::kName);
- Runtime::FunctionId function_id = typeof_mode == INSIDE_TYPEOF
+ Runtime::FunctionId function_id = typeof_mode == TypeofMode::kInside
? Runtime::kLoadLookupSlotInsideTypeof
: Runtime::kLoadLookupSlot;
TailCallRuntime(function_id, context, name);
@@ -3990,7 +4113,7 @@ void AccessorAssembler::GenerateLookupGlobalICBaseline(TypeofMode typeof_mode) {
// Slow path when we have to call out to the runtime
BIND(&slowpath);
- Runtime::FunctionId function_id = typeof_mode == INSIDE_TYPEOF
+ Runtime::FunctionId function_id = typeof_mode == TypeofMode::kInside
? Runtime::kLoadLookupSlotInsideTypeof
: Runtime::kLoadLookupSlot;
TailCallRuntime(function_id, context, name);
diff --git a/chromium/v8/src/ic/accessor-assembler.h b/chromium/v8/src/ic/accessor-assembler.h
index 79f8181af49..9aa15ae1b8e 100644
--- a/chromium/v8/src/ic/accessor-assembler.h
+++ b/chromium/v8/src/ic/accessor-assembler.h
@@ -311,6 +311,12 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler {
TVariable<MaybeObject>* var_handler,
Label* if_miss);
+ void TryMegaDOMCase(TNode<Object> lookup_start_object,
+ TNode<Map> lookup_start_object_map,
+ TVariable<MaybeObject>* var_handler, TNode<Object> vector,
+ TNode<TaggedIndex> slot, Label* miss,
+ ExitPoint* exit_point);
+
// LoadIC implementation.
void HandleLoadICHandlerCase(
const LazyLoadICParameters* p, TNode<Object> handler, Label* miss,
diff --git a/chromium/v8/src/ic/call-optimization.cc b/chromium/v8/src/ic/call-optimization.cc
index 6521e831939..4cfd1464be2 100644
--- a/chromium/v8/src/ic/call-optimization.cc
+++ b/chromium/v8/src/ic/call-optimization.cc
@@ -8,11 +8,8 @@
namespace v8 {
namespace internal {
-CallOptimization::CallOptimization(Isolate* isolate, Handle<Object> function) {
- constant_function_ = Handle<JSFunction>::null();
- is_simple_api_call_ = false;
- expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
- api_call_info_ = Handle<CallHandlerInfo>::null();
+template <class IsolateT>
+CallOptimization::CallOptimization(IsolateT* isolate, Handle<Object> function) {
if (function->IsJSFunction()) {
Initialize(isolate, Handle<JSFunction>::cast(function));
} else if (function->IsFunctionTemplateInfo()) {
@@ -20,6 +17,12 @@ CallOptimization::CallOptimization(Isolate* isolate, Handle<Object> function) {
}
}
+// Instantiations.
+template CallOptimization::CallOptimization(Isolate* isolate,
+ Handle<Object> function);
+template CallOptimization::CallOptimization(LocalIsolate* isolate,
+ Handle<Object> function);
+
Context CallOptimization::GetAccessorContext(Map holder_map) const {
if (is_constant_call()) {
return constant_function_->context().native_context();
@@ -35,8 +38,10 @@ bool CallOptimization::IsCrossContextLazyAccessorPair(Context native_context,
return native_context != GetAccessorContext(holder_map);
}
+template <class IsolateT>
Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
- Handle<Map> object_map, HolderLookup* holder_lookup) const {
+ IsolateT* isolate, Handle<Map> object_map,
+ HolderLookup* holder_lookup) const {
DCHECK(is_simple_api_call());
if (!object_map->IsJSObjectMap()) {
*holder_lookup = kHolderNotFound;
@@ -49,8 +54,8 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
}
if (object_map->IsJSGlobalProxyMap() && !object_map->prototype().IsNull()) {
JSObject raw_prototype = JSObject::cast(object_map->prototype());
- Handle<JSObject> prototype(raw_prototype, raw_prototype.GetIsolate());
- object_map = handle(prototype->map(), prototype->GetIsolate());
+ Handle<JSObject> prototype(raw_prototype, isolate);
+ object_map = handle(prototype->map(), isolate);
if (expected_receiver_type_->IsTemplateFor(*object_map)) {
*holder_lookup = kHolderFound;
return prototype;
@@ -60,6 +65,14 @@ Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
return Handle<JSObject>::null();
}
+// Instantiations.
+template Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
+ Isolate* isolate, Handle<Map> object_map,
+ HolderLookup* holder_lookup) const;
+template Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
+ LocalIsolate* isolate, Handle<Map> object_map,
+ HolderLookup* holder_lookup) const;
+
bool CallOptimization::IsCompatibleReceiverMap(
Handle<JSObject> api_holder, Handle<JSObject> holder,
HolderLookup holder_lookup) const {
@@ -86,8 +99,9 @@ bool CallOptimization::IsCompatibleReceiverMap(
UNREACHABLE();
}
+template <class IsolateT>
void CallOptimization::Initialize(
- Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
+ IsolateT* isolate, Handle<FunctionTemplateInfo> function_template_info) {
HeapObject call_code = function_template_info->call_code(kAcquireLoad);
if (call_code.IsUndefined(isolate)) return;
api_call_info_ = handle(CallHandlerInfo::cast(call_code), isolate);
@@ -98,9 +112,11 @@ void CallOptimization::Initialize(
handle(FunctionTemplateInfo::cast(signature), isolate);
}
is_simple_api_call_ = true;
+ accept_any_receiver_ = function_template_info->accept_any_receiver();
}
-void CallOptimization::Initialize(Isolate* isolate,
+template <class IsolateT>
+void CallOptimization::Initialize(IsolateT* isolate,
Handle<JSFunction> function) {
if (function.is_null() || !function->is_compiled()) return;
@@ -108,7 +124,8 @@ void CallOptimization::Initialize(Isolate* isolate,
AnalyzePossibleApiFunction(isolate, function);
}
-void CallOptimization::AnalyzePossibleApiFunction(Isolate* isolate,
+template <class IsolateT>
+void CallOptimization::AnalyzePossibleApiFunction(IsolateT* isolate,
Handle<JSFunction> function) {
if (!function->shared().IsApiFunction()) return;
Handle<FunctionTemplateInfo> info(function->shared().get_api_func_data(),
@@ -125,6 +142,7 @@ void CallOptimization::AnalyzePossibleApiFunction(Isolate* isolate,
}
is_simple_api_call_ = true;
+ accept_any_receiver_ = info->accept_any_receiver();
}
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/ic/call-optimization.h b/chromium/v8/src/ic/call-optimization.h
index b6d49a1bf90..ca5c8aa7ef8 100644
--- a/chromium/v8/src/ic/call-optimization.h
+++ b/chromium/v8/src/ic/call-optimization.h
@@ -10,16 +10,22 @@
namespace v8 {
namespace internal {
+
// Holds information about possible function call optimizations.
class CallOptimization {
public:
- CallOptimization(Isolate* isolate, Handle<Object> function);
+ template <class IsolateT>
+ CallOptimization(IsolateT* isolate, Handle<Object> function);
Context GetAccessorContext(Map holder_map) const;
bool IsCrossContextLazyAccessorPair(Context native_context,
Map holder_map) const;
bool is_constant_call() const { return !constant_function_.is_null(); }
+ bool accept_any_receiver() const { return accept_any_receiver_; }
+ bool requires_signature_check() const {
+ return !expected_receiver_type_.is_null();
+ }
Handle<JSFunction> constant_function() const {
DCHECK(is_constant_call());
@@ -39,27 +45,38 @@ class CallOptimization {
}
enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+
+ template <class IsolateT>
Handle<JSObject> LookupHolderOfExpectedType(
- Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
+ IsolateT* isolate, Handle<Map> receiver_map,
+ HolderLookup* holder_lookup) const;
bool IsCompatibleReceiverMap(Handle<JSObject> api_holder,
Handle<JSObject> holder, HolderLookup) const;
private:
- void Initialize(Isolate* isolate, Handle<JSFunction> function);
- void Initialize(Isolate* isolate,
+ template <class IsolateT>
+ void Initialize(IsolateT* isolate, Handle<JSFunction> function);
+ template <class IsolateT>
+ void Initialize(IsolateT* isolate,
Handle<FunctionTemplateInfo> function_template_info);
// Determines whether the given function can be called using the
// fast api call builtin.
- void AnalyzePossibleApiFunction(Isolate* isolate,
+ template <class IsolateT>
+ void AnalyzePossibleApiFunction(IsolateT* isolate,
Handle<JSFunction> function);
Handle<JSFunction> constant_function_;
- bool is_simple_api_call_;
Handle<FunctionTemplateInfo> expected_receiver_type_;
Handle<CallHandlerInfo> api_call_info_;
+
+ // TODO(gsathya): Change these to be a bitfield and do a single fast check
+ // rather than two checks.
+ bool is_simple_api_call_ = false;
+ bool accept_any_receiver_ = false;
};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/ic/ic.cc b/chromium/v8/src/ic/ic.cc
index 81e31d1c2d8..09f1815c1c0 100644
--- a/chromium/v8/src/ic/ic.cc
+++ b/chromium/v8/src/ic/ic.cc
@@ -33,6 +33,7 @@
#include "src/objects/heap-number-inl.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/megadom-handler.h"
#include "src/objects/module-inl.h"
#include "src/objects/prototype.h"
#include "src/objects/struct-inl.h"
@@ -59,6 +60,8 @@ char IC::TransitionMarkFromState(IC::State state) {
return 'P';
case MEGAMORPHIC:
return 'N';
+ case MEGADOM:
+ return 'D';
case GENERIC:
return 'G';
}
@@ -519,9 +522,12 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name,
bool use_ic = (state() != NO_FEEDBACK) && FLAG_use_ic && update_feedback;
if (use_ic) {
+ // 'const' Variables are mutable if REPL mode is enabled. This disables
+ // compiler inlining for all 'const' variables declared in REPL mode.
if (nexus()->ConfigureLexicalVarMode(
lookup_result.context_index, lookup_result.slot_index,
- lookup_result.mode == VariableMode::kConst)) {
+ (lookup_result.mode == VariableMode::kConst &&
+ !lookup_result.is_repl_mode))) {
TRACE_HANDLER_STATS(isolate(), LoadGlobalIC_LoadScriptContextField);
} else {
// Given combination of indices can't be encoded, so use slow stub.
@@ -566,6 +572,55 @@ static bool AddOneReceiverMapIfMissing(
return true;
}
+bool IC::UpdateMegaDOMIC(const MaybeObjectHandle& handler, Handle<Name> name) {
+ if (!FLAG_enable_mega_dom_ic) return false;
+
+ // TODO(gsathya): Enable fuzzing once this feature is more stable.
+ if (FLAG_fuzzing) return false;
+
+ // TODO(gsathya): Support KeyedLoadIC, StoreIC and KeyedStoreIC.
+ if (!IsLoadIC()) return false;
+
+ // Check if DOM protector cell is valid.
+ if (!Protectors::IsMegaDOMIntact(isolate())) return false;
+
+ // Check if current lookup object is an API object
+ Handle<Map> map = lookup_start_object_map();
+ if (!InstanceTypeChecker::IsJSApiObject(map->instance_type())) return false;
+
+ Handle<Object> accessor_obj;
+ // TODO(gsathya): Check if there are overloads possible for this accessor and
+ // transition only if it isn't possible.
+ if (!accessor().ToHandle(&accessor_obj)) return false;
+
+ // TODO(gsathya): This is also created in IC::ComputeHandler, find a way to
+ // reuse it here.
+ CallOptimization call_optimization(isolate(), accessor_obj);
+
+ // Check if accessor is an API function
+ if (!call_optimization.is_simple_api_call()) return false;
+
+ // Check if accessor requires access checks
+ if (call_optimization.accept_any_receiver()) return false;
+
+ // Check if accessor requires signature checks
+ if (!call_optimization.requires_signature_check()) return false;
+
+ // Check if the receiver is the holder
+ CallOptimization::HolderLookup holder_lookup;
+ call_optimization.LookupHolderOfExpectedType(isolate(), map, &holder_lookup);
+ if (holder_lookup != CallOptimization::kHolderIsReceiver) return false;
+
+ Handle<Context> accessor_context(call_optimization.GetAccessorContext(*map),
+ isolate());
+
+ Handle<MegaDomHandler> new_handler = isolate()->factory()->NewMegaDomHandler(
+ MaybeObjectHandle::Weak(accessor_obj),
+ MaybeObjectHandle::Weak(accessor_context));
+ nexus()->ConfigureMegaDOM(MaybeObjectHandle(new_handler));
+ return true;
+}
+
bool IC::UpdatePolymorphicIC(Handle<Name> name,
const MaybeObjectHandle& handler) {
DCHECK(IsHandler(*handler));
@@ -703,9 +758,12 @@ void IC::SetCache(Handle<Name> name, const MaybeObjectHandle& handler) {
V8_FALLTHROUGH;
case POLYMORPHIC:
if (UpdatePolymorphicIC(name, handler)) break;
+ if (UpdateMegaDOMIC(handler, name)) break;
if (!is_keyed() || state() == RECOMPUTE_HANDLER) {
CopyICToMegamorphicCache(name);
}
+ V8_FALLTHROUGH;
+ case MEGADOM:
ConfigureVectorState(MEGAMORPHIC, name);
V8_FALLTHROUGH;
case MEGAMORPHIC:
@@ -719,23 +777,20 @@ void IC::SetCache(Handle<Name> name, const MaybeObjectHandle& handler) {
}
void LoadIC::UpdateCaches(LookupIterator* lookup) {
- Handle<Object> code;
+ Handle<Object> handler;
if (lookup->state() == LookupIterator::ACCESS_CHECK) {
- code = LoadHandler::LoadSlow(isolate());
+ handler = LoadHandler::LoadSlow(isolate());
} else if (!lookup->IsFound()) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
Handle<Smi> smi_handler = LoadHandler::LoadNonExistent(isolate());
- code = LoadHandler::LoadFullChain(
+ handler = LoadHandler::LoadFullChain(
isolate(), lookup_start_object_map(),
MaybeObjectHandle(isolate()->factory()->null_value()), smi_handler);
} else if (IsLoadGlobalIC() && lookup->state() == LookupIterator::JSPROXY) {
// If there is proxy just install the slow stub since we need to call the
// HasProperty trap for global loads. The ProxyGetProperty builtin doesn't
// handle this case.
- Handle<Smi> slow_handler = LoadHandler::LoadSlow(isolate());
- Handle<JSProxy> holder = lookup->GetHolder<JSProxy>();
- code = LoadHandler::LoadFromPrototype(isolate(), lookup_start_object_map(),
- holder, slow_handler);
+ handler = LoadHandler::LoadSlow(isolate());
} else {
if (IsLoadGlobalIC()) {
if (lookup->TryLookupCachedProperty()) {
@@ -750,12 +805,12 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
return;
}
}
- code = ComputeHandler(lookup);
+ handler = ComputeHandler(lookup);
}
// Can't use {lookup->name()} because the LookupIterator might be in
// "elements" mode for keys that are strings representing integers above
// JSArray::kMaxIndex.
- SetCache(lookup->GetName(), code);
+ SetCache(lookup->GetName(), handler);
TraceIC("LoadIC", lookup->GetName());
}
@@ -877,6 +932,7 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return LoadHandler::LoadSlow(isolate());
}
+ set_accessor(getter);
if ((getter->IsFunctionTemplateInfo() &&
FunctionTemplateInfo::cast(*getter).BreakAtEntry()) ||
@@ -893,7 +949,8 @@ Handle<Object> LoadIC::ComputeHandler(LookupIterator* lookup) {
if (call_optimization.is_simple_api_call()) {
CallOptimization::HolderLookup holder_lookup;
Handle<JSObject> api_holder =
- call_optimization.LookupHolderOfExpectedType(map, &holder_lookup);
+ call_optimization.LookupHolderOfExpectedType(isolate(), map,
+ &holder_lookup);
if (!call_optimization.IsCompatibleReceiverMap(api_holder, holder,
holder_lookup) ||
@@ -1209,7 +1266,7 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map,
}
DCHECK(IsFastElementsKind(elements_kind) ||
IsAnyNonextensibleElementsKind(elements_kind) ||
- IsTypedArrayElementsKind(elements_kind));
+ IsTypedArrayOrRabGsabTypedArrayElementsKind(elements_kind));
bool convert_hole_to_undefined =
(elements_kind == HOLEY_SMI_ELEMENTS ||
elements_kind == HOLEY_ELEMENTS) &&
@@ -1325,7 +1382,7 @@ bool IsOutOfBoundsAccess(Handle<Object> receiver, size_t index) {
if (receiver->IsJSArray()) {
length = JSArray::cast(*receiver).length().Number();
} else if (receiver->IsJSTypedArray()) {
- length = JSTypedArray::cast(*receiver).length();
+ length = JSTypedArray::cast(*receiver).GetLength();
} else if (receiver->IsJSObject()) {
length = JSObject::cast(*receiver).elements().length();
} else if (receiver->IsString()) {
@@ -1756,7 +1813,7 @@ MaybeObjectHandle StoreIC::ComputeHandler(LookupIterator* lookup) {
CallOptimization::HolderLookup holder_lookup;
Handle<JSObject> api_holder =
call_optimization.LookupHolderOfExpectedType(
- lookup_start_object_map(), &holder_lookup);
+ isolate(), lookup_start_object_map(), &holder_lookup);
if (call_optimization.IsCompatibleReceiverMap(api_holder, holder,
holder_lookup)) {
Handle<Smi> smi_handler = StoreHandler::StoreApiSetter(
@@ -1975,7 +2032,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
"unsupported combination of arrays (potentially read-only length)");
return;
- } else if (map->has_typed_array_elements()) {
+ } else if (map->has_typed_array_or_rab_gsab_typed_array_elements()) {
DCHECK(!IsStoreInArrayLiteralICKind(kind()));
external_arrays++;
}
@@ -2029,7 +2086,9 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
receiver_map->has_typed_array_elements()) {
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
code = CodeFactory::StoreFastElementIC(isolate(), store_mode).code();
- if (receiver_map->has_typed_array_elements()) return code;
+ if (receiver_map->has_typed_array_elements()) {
+ return code;
+ }
} else if (IsStoreInArrayLiteralICKind(kind())) {
// TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), StoreInArrayLiteralIC_SlowStub);
@@ -2038,7 +2097,9 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
// TODO(jgruber): Update counter name.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
DCHECK(DICTIONARY_ELEMENTS == receiver_map->elements_kind() ||
- receiver_map->has_frozen_elements());
+ receiver_map->has_frozen_elements() ||
+ receiver_map->has_rab_gsab_typed_array_elements());
+ // TODO(v8:11111): Add fast paths for RAB / GSAB.
code = StoreHandler::StoreSlow(isolate(), store_mode);
}
@@ -2416,7 +2477,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
vector = Handle<FeedbackVector>::cast(maybe_vector);
}
- FeedbackSlotKind kind = (typeof_mode == TypeofMode::INSIDE_TYPEOF)
+ FeedbackSlotKind kind = (typeof_mode == TypeofMode::kInside)
? FeedbackSlotKind::kLoadGlobalInsideTypeof
: FeedbackSlotKind::kLoadGlobalNotInsideTypeof;
LoadGlobalIC ic(isolate, vector, vector_slot, kind);
diff --git a/chromium/v8/src/ic/ic.h b/chromium/v8/src/ic/ic.h
index 170ee609cb4..ddac48e38e2 100644
--- a/chromium/v8/src/ic/ic.h
+++ b/chromium/v8/src/ic/ic.h
@@ -67,6 +67,8 @@ class IC {
protected:
void set_slow_stub_reason(const char* reason) { slow_stub_reason_ = reason; }
+ void set_accessor(Handle<Object> accessor) { accessor_ = accessor; }
+ MaybeHandle<Object> accessor() const { return accessor_; }
Isolate* isolate() const { return isolate_; }
@@ -96,6 +98,7 @@ class IC {
MaybeHandle<Object> ReferenceError(Handle<Name> name);
void UpdateMonomorphicIC(const MaybeObjectHandle& handler, Handle<Name> name);
+ bool UpdateMegaDOMIC(const MaybeObjectHandle& handler, Handle<Name> name);
bool UpdatePolymorphicIC(Handle<Name> name, const MaybeObjectHandle& handler);
void UpdateMegamorphicCache(Handle<Map> map, Handle<Name> name,
const MaybeObjectHandle& handler);
@@ -154,7 +157,7 @@ class IC {
State state_;
FeedbackSlotKind kind_;
Handle<Map> lookup_start_object_map_;
-
+ MaybeHandle<Object> accessor_;
MapHandles target_maps_;
bool target_maps_set_;
diff --git a/chromium/v8/src/ic/keyed-store-generic.cc b/chromium/v8/src/ic/keyed-store-generic.cc
index b07ea644836..8218d3d5211 100644
--- a/chromium/v8/src/ic/keyed-store-generic.cc
+++ b/chromium/v8/src/ic/keyed-store-generic.cc
@@ -568,7 +568,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
// dispatch.
BIND(&if_nonfast);
{
- STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ STATIC_ASSERT(LAST_ELEMENTS_KIND ==
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
GotoIf(Int32GreaterThanOrEqual(
elements_kind,
Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
@@ -588,7 +589,8 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
BIND(&if_typed_array);
{
Comment("Typed array");
- // TODO(jkummerow): Support typed arrays.
+ // TODO(jkummerow): Support typed arrays. Note: RAB / GSAB backed typed
+ // arrays end up here too.
Goto(slow);
}
}
diff --git a/chromium/v8/src/init/bootstrapper.cc b/chromium/v8/src/init/bootstrapper.cc
index 7b1e7a196bd..1f00716b631 100644
--- a/chromium/v8/src/init/bootstrapper.cc
+++ b/chromium/v8/src/init/bootstrapper.cc
@@ -235,6 +235,8 @@ class Genesis {
enum ArrayBufferKind {
ARRAY_BUFFER,
SHARED_ARRAY_BUFFER,
+ RESIZABLE_ARRAY_BUFFER,
+ GROWABLE_SHARED_ARRAY_BUFFER
};
Handle<JSFunction> CreateArrayBuffer(Handle<String> name,
ArrayBufferKind array_buffer_kind);
@@ -244,7 +246,8 @@ class Genesis {
Handle<JSFunction> InstallTypedArray(const char* name,
ElementsKind elements_kind,
- InstanceType type);
+ InstanceType type,
+ int rab_gsab_initial_map_index);
void InitializeNormalizedMapCaches();
enum ExtensionTraversalState { UNVISITED, VISITED, INSTALLED };
@@ -364,7 +367,8 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
ReadOnlyRoots roots(isolate_);
Handle<JSGlobalProxy> global_proxy(env->global_proxy(), isolate_);
global_proxy->set_native_context(roots.null_value());
- JSObject::ForceSetPrototype(global_proxy, isolate_->factory()->null_value());
+ JSObject::ForceSetPrototype(isolate_, global_proxy,
+ isolate_->factory()->null_value());
global_proxy->map().SetConstructor(roots.null_value());
if (FLAG_track_detached_contexts) {
isolate_->AddDetachedContext(env);
@@ -921,10 +925,11 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
isolate()->object_function(), AllocationType::kOld);
native_context()->set_initial_generator_prototype(
*generator_object_prototype);
- JSObject::ForceSetPrototype(generator_object_prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(isolate(), generator_object_prototype,
+ iterator_prototype);
Handle<JSObject> generator_function_prototype = factory()->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(generator_function_prototype, empty);
+ JSObject::ForceSetPrototype(isolate(), generator_function_prototype, empty);
InstallToStringTag(isolate(), generator_function_prototype,
"GeneratorFunction");
@@ -1032,7 +1037,7 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
InstallToStringTag(isolate(), async_from_sync_iterator_prototype,
"Async-from-Sync Iterator");
- JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
+ JSObject::ForceSetPrototype(isolate(), async_from_sync_iterator_prototype,
async_iterator_prototype);
Handle<Map> async_from_sync_iterator_map = factory()->NewMap(
@@ -1049,7 +1054,8 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
isolate()->object_function(), AllocationType::kOld);
// %AsyncGenerator% / %AsyncGeneratorFunction%.prototype
- JSObject::ForceSetPrototype(async_generator_function_prototype, empty);
+ JSObject::ForceSetPrototype(isolate(), async_generator_function_prototype,
+ empty);
// The value of AsyncGeneratorFunction.prototype.prototype is the
// %AsyncGeneratorPrototype% intrinsic object.
@@ -1067,7 +1073,7 @@ void Genesis::CreateAsyncIteratorMaps(Handle<JSFunction> empty) {
"AsyncGeneratorFunction");
// %AsyncGeneratorPrototype%
- JSObject::ForceSetPrototype(async_generator_object_prototype,
+ JSObject::ForceSetPrototype(isolate(), async_generator_object_prototype,
async_iterator_prototype);
native_context()->set_initial_async_generator_prototype(
*async_generator_object_prototype);
@@ -1110,7 +1116,7 @@ void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
// %AsyncFunctionPrototype% intrinsic
Handle<JSObject> async_function_prototype = factory()->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(async_function_prototype, empty);
+ JSObject::ForceSetPrototype(isolate(), async_function_prototype, empty);
InstallToStringTag(isolate(), async_function_prototype, "AsyncFunction");
@@ -1367,7 +1373,7 @@ void Genesis::HookUpGlobalProxy(Handle<JSGlobalProxy> global_proxy) {
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
Handle<JSObject> global_object(
JSObject::cast(native_context()->global_object()), isolate());
- JSObject::ForceSetPrototype(global_proxy, global_object);
+ JSObject::ForceSetPrototype(isolate(), global_proxy, global_object);
global_proxy->set_native_context(*native_context());
DCHECK(native_context()->global_proxy() == *global_proxy);
}
@@ -1394,7 +1400,8 @@ static void InstallWithIntrinsicDefaultProto(Isolate* isolate,
JSObject::AddProperty(isolate, function,
isolate->factory()->native_context_index_symbol(),
index, NONE);
- isolate->native_context()->set(context_index, *function);
+ isolate->native_context()->set(context_index, *function, UPDATE_WRITE_BARRIER,
+ kReleaseStore);
}
static void InstallError(
@@ -1404,6 +1411,11 @@ static void InstallError(
int error_function_length = 1, int in_object_properties = 2) {
Factory* factory = isolate->factory();
+ if (FLAG_harmony_error_cause) {
+ error_function_length += 1;
+ in_object_properties += 1;
+ }
+
// Most Error objects consist of a message and a stack trace.
// Reserve two in-object properties for these.
const int kErrorObjectSize =
@@ -1411,7 +1423,6 @@ static void InstallError(
Handle<JSFunction> error_fun = InstallFunction(
isolate, global, name, JS_ERROR_TYPE, kErrorObjectSize,
in_object_properties, factory->the_hole_value(), error_constructor);
- error_fun->shared().DontAdaptArguments();
error_fun->shared().set_length(error_function_length);
if (context_index == Context::ERROR_FUNCTION_INDEX) {
@@ -1431,6 +1442,11 @@ static void InstallError(
JSObject::AddProperty(isolate, prototype, factory->message_string(),
factory->empty_string(), DONT_ENUM);
+ if (FLAG_harmony_error_cause) {
+ JSObject::AddProperty(isolate, prototype, factory->cause_string(),
+ factory->undefined_value(), DONT_ENUM);
+ }
+
if (context_index == Context::ERROR_FUNCTION_INDEX) {
Handle<JSFunction> to_string_fun =
SimpleInstallFunction(isolate, prototype, "toString",
@@ -1465,7 +1481,6 @@ static void InstallError(
void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> empty_function) {
// --- N a t i v e C o n t e x t ---
- native_context()->set_previous(Context());
// Set extension and global object.
native_context()->set_extension(*global_object);
// Security setup: Set the security token of the native context to the global
@@ -1626,8 +1641,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
JSFunction::kSizeWithPrototype, 0, prototype,
Builtins::kFunctionConstructor);
// Function instances are sloppy by default.
- function_fun->set_prototype_or_initial_map(
- *isolate_->sloppy_function_map());
+ function_fun->set_prototype_or_initial_map(*isolate_->sloppy_function_map(),
+ kReleaseStore);
function_fun->shared().DontAdaptArguments();
function_fun->shared().set_length(1);
InstallWithIntrinsicDefaultProto(isolate_, function_fun,
@@ -1636,8 +1651,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Setup the methods on the %FunctionPrototype%.
JSObject::AddProperty(isolate_, prototype, factory->constructor_string(),
function_fun, DONT_ENUM);
- SimpleInstallFunction(isolate_, prototype, "apply",
- Builtins::kFunctionPrototypeApply, 2, false);
+ Handle<JSFunction> function_prototype_apply =
+ SimpleInstallFunction(isolate_, prototype, "apply",
+ Builtins::kFunctionPrototypeApply, 2, false);
+ native_context()->set_function_prototype_apply(*function_prototype_apply);
SimpleInstallFunction(isolate_, prototype, "bind",
Builtins::kFastFunctionPrototypeBind, 1, false);
SimpleInstallFunction(isolate_, prototype, "call",
@@ -1829,7 +1846,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> array_iterator_prototype =
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(array_iterator_prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(isolate(), array_iterator_prototype,
+ iterator_prototype);
CHECK_NE(array_iterator_prototype->map().ptr(),
isolate_->initial_object_prototype()->map().ptr());
array_iterator_prototype->map().set_instance_type(
@@ -2145,7 +2163,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> string_iterator_prototype =
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(string_iterator_prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(isolate(), string_iterator_prototype,
+ iterator_prototype);
CHECK_NE(string_iterator_prototype->map().ptr(),
isolate_->initial_object_prototype()->map().ptr());
string_iterator_prototype->map().set_instance_type(
@@ -2623,7 +2642,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSObject> regexp_string_iterator_prototype = factory->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(regexp_string_iterator_prototype,
+ JSObject::ForceSetPrototype(isolate(), regexp_string_iterator_prototype,
iterator_prototype);
InstallToStringTag(isolate(), regexp_string_iterator_prototype,
@@ -3222,7 +3241,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->initial_iterator_prototype(), isolate());
Handle<JSObject> prototype = factory->NewJSObject(
isolate()->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(isolate(), prototype, iterator_prototype);
// #sec-%segmentiteratorprototype%.@@tostringtag
//
// %SegmentIteratorPrototype% [ @@toStringTag ]
@@ -3274,6 +3293,25 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallSpeciesGetter(isolate_, shared_array_buffer_fun);
}
+ { // R e s i z a b l e A r r a y B u f f e r
+ Handle<String> name = factory->ResizableArrayBuffer_string();
+ Handle<JSFunction> resizable_array_buffer_fun =
+ CreateArrayBuffer(name, RESIZABLE_ARRAY_BUFFER);
+ InstallWithIntrinsicDefaultProto(isolate_, resizable_array_buffer_fun,
+ Context::RESIZABLE_ARRAY_BUFFER_FUN_INDEX);
+ InstallSpeciesGetter(isolate_, resizable_array_buffer_fun);
+ }
+
+ { // G r o w a b l e S h a r e d A r r a y B u f f e r
+ Handle<String> name = factory->GrowableSharedArrayBuffer_string();
+ Handle<JSFunction> growable_shared_array_buffer_fun =
+ CreateArrayBuffer(name, GROWABLE_SHARED_ARRAY_BUFFER);
+ InstallWithIntrinsicDefaultProto(
+ isolate_, growable_shared_array_buffer_fun,
+ Context::GROWABLE_SHARED_ARRAY_BUFFER_FUN_INDEX);
+ InstallSpeciesGetter(isolate_, growable_shared_array_buffer_fun);
+ }
+
{ // -- A t o m i c s
Handle<JSObject> atomics_object =
factory->NewJSObject(isolate_->object_function(), AllocationType::kOld);
@@ -3403,12 +3441,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
}
{// -- T y p e d A r r a y s
-#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype) \
- { \
- Handle<JSFunction> fun = InstallTypedArray( \
- #Type "Array", TYPE##_ELEMENTS, TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE); \
- InstallWithIntrinsicDefaultProto(isolate_, fun, \
- Context::TYPE##_ARRAY_FUN_INDEX); \
+#define INSTALL_TYPED_ARRAY(Type, type, TYPE, ctype) \
+ { \
+ Handle<JSFunction> fun = InstallTypedArray( \
+ #Type "Array", TYPE##_ELEMENTS, TYPE##_TYPED_ARRAY_CONSTRUCTOR_TYPE, \
+ Context::RAB_GSAB_##TYPE##_ARRAY_MAP_INDEX); \
+ InstallWithIntrinsicDefaultProto(isolate_, fun, \
+ Context::TYPE##_ARRAY_FUN_INDEX); \
}
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
@@ -3862,6 +3901,61 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_bound_function_with_constructor_map(*map);
}
+ { // -- F i n a l i z a t i o n R e g i s t r y
+ Handle<JSFunction> finalization_registry_fun = InstallFunction(
+ isolate_, global, factory->FinalizationRegistry_string(),
+ JS_FINALIZATION_REGISTRY_TYPE, JSFinalizationRegistry::kHeaderSize, 0,
+ factory->the_hole_value(), Builtins::kFinalizationRegistryConstructor);
+ InstallWithIntrinsicDefaultProto(
+ isolate_, finalization_registry_fun,
+ Context::JS_FINALIZATION_REGISTRY_FUNCTION_INDEX);
+
+ finalization_registry_fun->shared().DontAdaptArguments();
+ finalization_registry_fun->shared().set_length(1);
+
+ Handle<JSObject> finalization_registry_prototype(
+ JSObject::cast(finalization_registry_fun->instance_prototype()),
+ isolate());
+
+ InstallToStringTag(isolate_, finalization_registry_prototype,
+ factory->FinalizationRegistry_string());
+
+ SimpleInstallFunction(isolate_, finalization_registry_prototype, "register",
+ Builtins::kFinalizationRegistryRegister, 2, false);
+
+ SimpleInstallFunction(isolate_, finalization_registry_prototype,
+ "unregister",
+ Builtins::kFinalizationRegistryUnregister, 1, false);
+
+ // The cleanupSome function is created but not exposed, as it is used
+ // internally by InvokeFinalizationRegistryCleanupFromTask.
+ //
+ // It is exposed by FLAG_harmony_weak_refs_with_cleanup_some.
+ Handle<JSFunction> cleanup_some_fun = SimpleCreateFunction(
+ isolate_, factory->InternalizeUtf8String("cleanupSome"),
+ Builtins::kFinalizationRegistryPrototypeCleanupSome, 0, false);
+ native_context()->set_finalization_registry_cleanup_some(*cleanup_some_fun);
+ }
+
+ { // -- W e a k R e f
+ Handle<JSFunction> weak_ref_fun = InstallFunction(
+ isolate_, global, "WeakRef", JS_WEAK_REF_TYPE, JSWeakRef::kHeaderSize,
+ 0, factory->the_hole_value(), Builtins::kWeakRefConstructor);
+ InstallWithIntrinsicDefaultProto(isolate_, weak_ref_fun,
+ Context::JS_WEAK_REF_FUNCTION_INDEX);
+
+ weak_ref_fun->shared().DontAdaptArguments();
+ weak_ref_fun->shared().set_length(1);
+
+ Handle<JSObject> weak_ref_prototype(
+ JSObject::cast(weak_ref_fun->instance_prototype()), isolate());
+
+ InstallToStringTag(isolate_, weak_ref_prototype, factory->WeakRef_string());
+
+ SimpleInstallFunction(isolate_, weak_ref_prototype, "deref",
+ Builtins::kWeakRefDeref, 0, true);
+ }
+
{ // --- sloppy arguments map
Handle<String> arguments_string = factory->Arguments_string();
Handle<JSFunction> function = CreateFunctionForBuiltinWithPrototype(
@@ -3984,7 +4078,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
ElementsKind elements_kind,
- InstanceType type) {
+ InstanceType type,
+ int rab_gsab_initial_map_index) {
Handle<JSObject> global =
Handle<JSObject>(native_context()->global_object(), isolate());
@@ -4021,6 +4116,16 @@ Handle<JSFunction> Genesis::InstallTypedArray(const char* name,
prototype->map().set_instance_type(JS_TYPED_ARRAY_PROTOTYPE_TYPE);
InstallConstant(isolate(), prototype, "BYTES_PER_ELEMENT", bytes_per_element);
+
+ // RAB / GSAB backed TypedArrays don't have separate constructors, but they
+ // have their own maps. Create the corresponding map here.
+ Handle<Map> rab_gsab_initial_map = factory()->NewMap(
+ JS_TYPED_ARRAY_TYPE, JSTypedArray::kSizeWithEmbedderFields,
+ GetCorrespondingRabGsabElementsKind(elements_kind), 0);
+ native_context()->set(rab_gsab_initial_map_index, *rab_gsab_initial_map,
+ UPDATE_WRITE_BARRIER, kReleaseStore);
+ Map::SetPrototype(isolate(), rab_gsab_initial_map, prototype);
+
return result;
}
@@ -4099,14 +4204,14 @@ void Genesis::InitializeIteratorFunctions() {
JSFunction::kSizeWithPrototype, 0, generator_function_prototype,
Builtins::kGeneratorFunctionConstructor);
generator_function_function->set_prototype_or_initial_map(
- native_context->generator_function_map());
+ native_context->generator_function_map(), kReleaseStore);
generator_function_function->shared().DontAdaptArguments();
generator_function_function->shared().set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, generator_function_function,
Context::GENERATOR_FUNCTION_FUNCTION_INDEX);
- JSObject::ForceSetPrototype(generator_function_function,
+ JSObject::ForceSetPrototype(isolate, generator_function_function,
isolate->function_function());
JSObject::AddProperty(
isolate, generator_function_prototype, factory->constructor_string(),
@@ -4128,14 +4233,14 @@ void Genesis::InitializeIteratorFunctions() {
JSFunction::kSizeWithPrototype, 0, async_generator_function_prototype,
Builtins::kAsyncGeneratorFunctionConstructor);
async_generator_function_function->set_prototype_or_initial_map(
- native_context->async_generator_function_map());
+ native_context->async_generator_function_map(), kReleaseStore);
async_generator_function_function->shared().DontAdaptArguments();
async_generator_function_function->shared().set_length(1);
InstallWithIntrinsicDefaultProto(
isolate, async_generator_function_function,
Context::ASYNC_GENERATOR_FUNCTION_FUNCTION_INDEX);
- JSObject::ForceSetPrototype(async_generator_function_function,
+ JSObject::ForceSetPrototype(isolate, async_generator_function_function,
isolate->function_function());
JSObject::AddProperty(
@@ -4151,7 +4256,7 @@ void Genesis::InitializeIteratorFunctions() {
// Setup %SetIteratorPrototype%.
Handle<JSObject> prototype =
factory->NewJSObject(isolate->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(isolate, prototype, iterator_prototype);
InstallToStringTag(isolate, prototype, factory->SetIterator_string());
@@ -4184,7 +4289,7 @@ void Genesis::InitializeIteratorFunctions() {
// Setup %MapIteratorPrototype%.
Handle<JSObject> prototype =
factory->NewJSObject(isolate->object_function(), AllocationType::kOld);
- JSObject::ForceSetPrototype(prototype, iterator_prototype);
+ JSObject::ForceSetPrototype(isolate, prototype, iterator_prototype);
InstallToStringTag(isolate, prototype, factory->MapIterator_string());
@@ -4229,11 +4334,11 @@ void Genesis::InitializeIteratorFunctions() {
JSFunction::kSizeWithPrototype, 0, async_function_prototype,
Builtins::kAsyncFunctionConstructor);
async_function_constructor->set_prototype_or_initial_map(
- native_context->async_function_map());
+ native_context->async_function_map(), kReleaseStore);
async_function_constructor->shared().DontAdaptArguments();
async_function_constructor->shared().set_length(1);
native_context->set_async_function_constructor(*async_function_constructor);
- JSObject::ForceSetPrototype(async_function_constructor,
+ JSObject::ForceSetPrototype(isolate, async_function_constructor,
isolate->function_function());
JSObject::AddProperty(
@@ -4325,6 +4430,7 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_top_level_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_import_assertions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_private_brand_checks)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_static_blocks)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_error_cause)
#ifdef V8_INTL_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_best_fit_matcher)
@@ -4335,7 +4441,10 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_intl_dateformat_day_period)
#undef EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
- if (!FLAG_harmony_sharedarraybuffer) return;
+ if (!FLAG_harmony_sharedarraybuffer ||
+ FLAG_enable_sharedarraybuffer_per_context) {
+ return;
+ }
Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
@@ -4353,75 +4462,8 @@ void Genesis::InitializeGlobal_harmony_atomics() {
InstallToStringTag(isolate_, isolate()->atomics_object(), "Atomics");
}
-void Genesis::InitializeGlobal_harmony_weak_refs() {
- if (!FLAG_harmony_weak_refs) return;
-
- Factory* factory = isolate()->factory();
- Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
-
- {
- // Create %FinalizationRegistry%
- Handle<JSFunction> finalization_registry_fun = InstallFunction(
- isolate(), global, factory->FinalizationRegistry_string(),
- JS_FINALIZATION_REGISTRY_TYPE, JSFinalizationRegistry::kHeaderSize, 0,
- factory->the_hole_value(), Builtins::kFinalizationRegistryConstructor);
- InstallWithIntrinsicDefaultProto(
- isolate(), finalization_registry_fun,
- Context::JS_FINALIZATION_REGISTRY_FUNCTION_INDEX);
-
- finalization_registry_fun->shared().DontAdaptArguments();
- finalization_registry_fun->shared().set_length(1);
-
- Handle<JSObject> finalization_registry_prototype(
- JSObject::cast(finalization_registry_fun->instance_prototype()),
- isolate());
-
- InstallToStringTag(isolate(), finalization_registry_prototype,
- factory->FinalizationRegistry_string());
-
- SimpleInstallFunction(isolate(), finalization_registry_prototype,
- "register", Builtins::kFinalizationRegistryRegister,
- 2, false);
-
- SimpleInstallFunction(isolate(), finalization_registry_prototype,
- "unregister",
- Builtins::kFinalizationRegistryUnregister, 1, false);
-
- // The cleanupSome function is created but not exposed, as it is used
- // internally by InvokeFinalizationRegistryCleanupFromTask.
- //
- // It is exposed by FLAG_harmony_weak_refs_with_cleanup_some.
- Handle<JSFunction> cleanup_some_fun = SimpleCreateFunction(
- isolate(), factory->InternalizeUtf8String("cleanupSome"),
- Builtins::kFinalizationRegistryPrototypeCleanupSome, 0, false);
- native_context()->set_finalization_registry_cleanup_some(*cleanup_some_fun);
- }
- {
- // Create %WeakRef%
- Handle<JSFunction> weak_ref_fun = InstallFunction(
- isolate(), global, factory->WeakRef_string(), JS_WEAK_REF_TYPE,
- JSWeakRef::kHeaderSize, 0, factory->the_hole_value(),
- Builtins::kWeakRefConstructor);
- InstallWithIntrinsicDefaultProto(isolate(), weak_ref_fun,
- Context::JS_WEAK_REF_FUNCTION_INDEX);
-
- weak_ref_fun->shared().DontAdaptArguments();
- weak_ref_fun->shared().set_length(1);
-
- Handle<JSObject> weak_ref_prototype(
- JSObject::cast(weak_ref_fun->instance_prototype()), isolate());
-
- InstallToStringTag(isolate(), weak_ref_prototype,
- factory->WeakRef_string());
-
- SimpleInstallFunction(isolate(), weak_ref_prototype, "deref",
- Builtins::kWeakRefDeref, 0, true);
- }
-}
-
void Genesis::InitializeGlobal_harmony_weak_refs_with_cleanup_some() {
if (!FLAG_harmony_weak_refs_with_cleanup_some) return;
- DCHECK(FLAG_harmony_weak_refs);
Handle<JSFunction> finalization_registry_fun =
isolate()->js_finalization_registry_fun();
@@ -4519,6 +4561,45 @@ void Genesis::InitializeGlobal_harmony_relative_indexing_methods() {
}
}
+#ifdef V8_INTL_SUPPORT
+
+void Genesis::InitializeGlobal_harmony_intl_locale_info() {
+ if (!FLAG_harmony_intl_locale_info) return;
+ Handle<JSObject> prototype(
+ JSObject::cast(native_context()->intl_locale_function().prototype()),
+ isolate_);
+ SimpleInstallGetter(isolate(), prototype, factory()->calendars_string(),
+ Builtins::kLocalePrototypeCalendars, true);
+ SimpleInstallGetter(isolate(), prototype, factory()->collations_string(),
+ Builtins::kLocalePrototypeCollations, true);
+ SimpleInstallGetter(isolate(), prototype, factory()->hourCycles_string(),
+ Builtins::kLocalePrototypeHourCycles, true);
+ SimpleInstallGetter(isolate(), prototype,
+ factory()->numberingSystems_string(),
+ Builtins::kLocalePrototypeNumberingSystems, true);
+ SimpleInstallGetter(isolate(), prototype, factory()->textInfo_string(),
+ Builtins::kLocalePrototypeTextInfo, true);
+ SimpleInstallGetter(isolate(), prototype, factory()->timeZones_string(),
+ Builtins::kLocalePrototypeTimeZones, true);
+ SimpleInstallGetter(isolate(), prototype, factory()->weekInfo_string(),
+ Builtins::kLocalePrototypeWeekInfo, true);
+}
+
+#endif // V8_INTL_SUPPORT
+
+void Genesis::InitializeGlobal_harmony_rab_gsab() {
+ if (!FLAG_harmony_rab_gsab) return;
+
+ Handle<JSGlobalObject> global(native_context()->global_object(), isolate());
+
+ JSObject::AddProperty(isolate_, global, "ResizableArrayBuffer",
+ isolate()->resizable_array_buffer_fun(), DONT_ENUM);
+
+ JSObject::AddProperty(isolate_, global, "GrowableSharedArrayBuffer",
+ isolate()->growable_shared_array_buffer_fun(),
+ DONT_ENUM);
+}
+
Handle<JSFunction> Genesis::CreateArrayBuffer(
Handle<String> name, ArrayBufferKind array_buffer_kind) {
// Create the %ArrayBufferPrototype%
@@ -4562,6 +4643,28 @@ Handle<JSFunction> Genesis::CreateArrayBuffer(
Builtins::kSharedArrayBufferPrototypeSlice, 2,
true);
break;
+ case RESIZABLE_ARRAY_BUFFER:
+ SimpleInstallGetter(isolate(), prototype, factory()->byte_length_string(),
+ Builtins::kResizableArrayBufferPrototypeGetByteLength,
+ false);
+ SimpleInstallGetter(
+ isolate(), prototype, factory()->max_byte_length_string(),
+ Builtins::kResizableArrayBufferPrototypeGetMaxByteLength, false);
+ SimpleInstallFunction(isolate(), prototype, "resize",
+ Builtins::kResizableArrayBufferPrototypeResize, 1,
+ true);
+ break;
+ case GROWABLE_SHARED_ARRAY_BUFFER:
+ SimpleInstallGetter(
+ isolate(), prototype, factory()->byte_length_string(),
+ Builtins::kGrowableSharedArrayBufferPrototypeGetByteLength, true);
+ SimpleInstallGetter(
+ isolate(), prototype, factory()->max_byte_length_string(),
+ Builtins::kGrowableSharedArrayBufferPrototypeGetMaxByteLength, false);
+ SimpleInstallFunction(isolate(), prototype, "grow",
+ Builtins::kGrowableSharedArrayBufferPrototypeGrow,
+ 1, true);
+ break;
}
return array_buffer_fun;
@@ -5097,7 +5200,7 @@ bool Genesis::ConfigureGlobalObjects(
}
}
- JSObject::ForceSetPrototype(global_proxy, global_object);
+ JSObject::ForceSetPrototype(isolate(), global_proxy, global_object);
native_context()->set_array_buffer_map(
native_context()->array_buffer_fun().initial_map());
@@ -5262,7 +5365,7 @@ void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
// Transfer the prototype (new map is needed).
Handle<HeapObject> proto(from->map().prototype(), isolate());
- JSObject::ForceSetPrototype(to, proto);
+ JSObject::ForceSetPrototype(isolate(), to, proto);
}
Handle<Map> Genesis::CreateInitialMapForArraySubclass(int size,
@@ -5312,7 +5415,7 @@ Genesis::Genesis(
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
v8::MicrotaskQueue* microtask_queue)
: isolate_(isolate), active_(isolate->bootstrapper()) {
- RuntimeCallTimerScope rcs_timer(isolate, RuntimeCallCounterId::kGenesis);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kGenesis);
result_ = Handle<Context>::null();
global_proxy_ = Handle<JSGlobalProxy>::null();
@@ -5495,7 +5598,7 @@ Genesis::Genesis(Isolate* isolate,
global_proxy->set_native_context(ReadOnlyRoots(heap()).null_value());
// Configure the hidden prototype chain of the global proxy.
- JSObject::ForceSetPrototype(global_proxy, global_object);
+ JSObject::ForceSetPrototype(isolate, global_proxy, global_object);
global_proxy->map().SetConstructor(*global_constructor);
global_proxy_ = global_proxy;
diff --git a/chromium/v8/src/init/heap-symbols.h b/chromium/v8/src/init/heap-symbols.h
index 71eb0545965..4d0213e8d1f 100644
--- a/chromium/v8/src/init/heap-symbols.h
+++ b/chromium/v8/src/init/heap-symbols.h
@@ -12,10 +12,12 @@
V(_, accounting_string, "accounting") \
V(_, breakType_string, "breakType") \
V(_, calendar_string, "calendar") \
+ V(_, calendars_string, "calendars") \
V(_, cardinal_string, "cardinal") \
V(_, caseFirst_string, "caseFirst") \
V(_, compare_string, "compare") \
V(_, collation_string, "collation") \
+ V(_, collations_string, "collations") \
V(_, compact_string, "compact") \
V(_, compactDisplay_string, "compactDisplay") \
V(_, currency_string, "currency") \
@@ -26,6 +28,7 @@
V(_, day_string, "day") \
V(_, dayPeriod_string, "dayPeriod") \
V(_, decimal_string, "decimal") \
+ V(_, direction_string, "direction") \
V(_, endRange_string, "endRange") \
V(_, engineering_string, "engineering") \
V(_, era_string, "era") \
@@ -35,6 +38,7 @@
V(_, exponentSeparator_string, "exponentSeparator") \
V(_, fallback_string, "fallback") \
V(_, first_string, "first") \
+ V(_, firstDay_string, "firstDay") \
V(_, format_string, "format") \
V(_, fraction_string, "fraction") \
V(_, fractionalSecond_string, "fractionalSecond") \
@@ -50,6 +54,7 @@
V(_, hour_string, "hour") \
V(_, hour12_string, "hour12") \
V(_, hourCycle_string, "hourCycle") \
+ V(_, hourCycles_string, "hourCycles") \
V(_, ideo_string, "ideo") \
V(_, ignorePunctuation_string, "ignorePunctuation") \
V(_, Invalid_Date_string, "Invalid Date") \
@@ -63,8 +68,10 @@
V(_, locale_string, "locale") \
V(_, loose_string, "loose") \
V(_, lower_string, "lower") \
+ V(_, ltr_string, "ltr") \
V(_, maximumFractionDigits_string, "maximumFractionDigits") \
V(_, maximumSignificantDigits_string, "maximumSignificantDigits") \
+ V(_, minimalDays_string, "minimalDays") \
V(_, minimumFractionDigits_string, "minimumFractionDigits") \
V(_, minimumIntegerDigits_string, "minimumIntegerDigits") \
V(_, minimumSignificantDigits_string, "minimumSignificantDigits") \
@@ -78,6 +85,7 @@
V(_, notation_string, "notation") \
V(_, normal_string, "normal") \
V(_, numberingSystem_string, "numberingSystem") \
+ V(_, numberingSystems_string, "numberingSystems") \
V(_, numeric_string, "numeric") \
V(_, ordinal_string, "ordinal") \
V(_, percentSign_string, "percentSign") \
@@ -85,6 +93,7 @@
V(_, quarter_string, "quarter") \
V(_, region_string, "region") \
V(_, relatedYear_string, "relatedYear") \
+ V(_, rtl_string, "rtl") \
V(_, scientific_string, "scientific") \
V(_, second_string, "second") \
V(_, segment_string, "segment") \
@@ -99,8 +108,10 @@
V(_, strict_string, "strict") \
V(_, style_string, "style") \
V(_, term_string, "term") \
+ V(_, textInfo_string, "textInfo") \
V(_, timeStyle_string, "timeStyle") \
V(_, timeZone_string, "timeZone") \
+ V(_, timeZones_string, "timeZones") \
V(_, timeZoneName_string, "timeZoneName") \
V(_, type_string, "type") \
V(_, unknown_string, "unknown") \
@@ -111,228 +122,235 @@
V(_, unit_string, "unit") \
V(_, unitDisplay_string, "unitDisplay") \
V(_, weekday_string, "weekday") \
+ V(_, weekendEnd_string, "weekendEnd") \
+ V(_, weekendStart_string, "weekendStart") \
+ V(_, weekInfo_string, "weekInfo") \
V(_, year_string, "year") \
V(_, yearName_string, "yearName")
#else // V8_INTL_SUPPORT
#define INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _)
#endif // V8_INTL_SUPPORT
-#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
- INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
- V(_, add_string, "add") \
- V(_, AggregateError_string, "AggregateError") \
- V(_, always_string, "always") \
- V(_, anonymous_function_string, "(anonymous function)") \
- V(_, anonymous_string, "anonymous") \
- V(_, apply_string, "apply") \
- V(_, Arguments_string, "Arguments") \
- V(_, arguments_string, "arguments") \
- V(_, arguments_to_string, "[object Arguments]") \
- V(_, Array_string, "Array") \
- V(_, array_to_string, "[object Array]") \
- V(_, ArrayBuffer_string, "ArrayBuffer") \
- V(_, ArrayIterator_string, "Array Iterator") \
- V(_, as_string, "as") \
- V(_, assert_string, "assert") \
- V(_, async_string, "async") \
- V(_, auto_string, "auto") \
- V(_, await_string, "await") \
- V(_, BigInt_string, "BigInt") \
- V(_, bigint_string, "bigint") \
- V(_, BigInt64Array_string, "BigInt64Array") \
- V(_, BigUint64Array_string, "BigUint64Array") \
- V(_, bind_string, "bind") \
- V(_, Boolean_string, "Boolean") \
- V(_, boolean_string, "boolean") \
- V(_, boolean_to_string, "[object Boolean]") \
- V(_, bound__string, "bound ") \
- V(_, buffer_string, "buffer") \
- V(_, byte_length_string, "byteLength") \
- V(_, byte_offset_string, "byteOffset") \
- V(_, CompileError_string, "CompileError") \
- V(_, callee_string, "callee") \
- V(_, caller_string, "caller") \
- V(_, character_string, "character") \
- V(_, closure_string, "(closure)") \
- V(_, code_string, "code") \
- V(_, column_string, "column") \
- V(_, computed_string, "<computed>") \
- V(_, configurable_string, "configurable") \
- V(_, conjunction_string, "conjunction") \
- V(_, construct_string, "construct") \
- V(_, constructor_string, "constructor") \
- V(_, current_string, "current") \
- V(_, Date_string, "Date") \
- V(_, date_to_string, "[object Date]") \
- V(_, default_string, "default") \
- V(_, defineProperty_string, "defineProperty") \
- V(_, deleteProperty_string, "deleteProperty") \
- V(_, disjunction_string, "disjunction") \
- V(_, done_string, "done") \
- V(_, dot_brand_string, ".brand") \
- V(_, dot_catch_string, ".catch") \
- V(_, dot_default_string, ".default") \
- V(_, dot_for_string, ".for") \
- V(_, dot_generator_object_string, ".generator_object") \
- V(_, dot_home_object_string, ".home_object") \
- V(_, dot_result_string, ".result") \
- V(_, dot_repl_result_string, ".repl_result") \
- V(_, dot_static_home_object_string, "._static_home_object") \
- V(_, dot_string, ".") \
- V(_, dot_switch_tag_string, ".switch_tag") \
- V(_, dotAll_string, "dotAll") \
- V(_, enumerable_string, "enumerable") \
- V(_, element_string, "element") \
- V(_, Error_string, "Error") \
- V(_, errors_string, "errors") \
- V(_, error_to_string, "[object Error]") \
- V(_, eval_string, "eval") \
- V(_, EvalError_string, "EvalError") \
- V(_, exec_string, "exec") \
- V(_, false_string, "false") \
- V(_, FinalizationRegistry_string, "FinalizationRegistry") \
- V(_, flags_string, "flags") \
- V(_, Float32Array_string, "Float32Array") \
- V(_, Float64Array_string, "Float64Array") \
- V(_, from_string, "from") \
- V(_, Function_string, "Function") \
- V(_, function_native_code_string, "function () { [native code] }") \
- V(_, function_string, "function") \
- V(_, function_to_string, "[object Function]") \
- V(_, Generator_string, "Generator") \
- V(_, get_space_string, "get ") \
- V(_, get_string, "get") \
- V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
- V(_, getPrototypeOf_string, "getPrototypeOf") \
- V(_, global_string, "global") \
- V(_, globalThis_string, "globalThis") \
- V(_, groups_string, "groups") \
- V(_, has_string, "has") \
- V(_, has_indices_string, "hasIndices") \
- V(_, ignoreCase_string, "ignoreCase") \
- V(_, illegal_access_string, "illegal access") \
- V(_, illegal_argument_string, "illegal argument") \
- V(_, index_string, "index") \
- V(_, indices_string, "indices") \
- V(_, Infinity_string, "Infinity") \
- V(_, infinity_string, "infinity") \
- V(_, input_string, "input") \
- V(_, Int16Array_string, "Int16Array") \
- V(_, Int32Array_string, "Int32Array") \
- V(_, Int8Array_string, "Int8Array") \
- V(_, isExtensible_string, "isExtensible") \
- V(_, jsMemoryEstimate_string, "jsMemoryEstimate") \
- V(_, jsMemoryRange_string, "jsMemoryRange") \
- V(_, keys_string, "keys") \
- V(_, lastIndex_string, "lastIndex") \
- V(_, length_string, "length") \
- V(_, let_string, "let") \
- V(_, line_string, "line") \
- V(_, linear_string, "linear") \
- V(_, LinkError_string, "LinkError") \
- V(_, long_string, "long") \
- V(_, Map_string, "Map") \
- V(_, MapIterator_string, "Map Iterator") \
- V(_, medium_string, "medium") \
- V(_, message_string, "message") \
- V(_, meta_string, "meta") \
- V(_, minus_Infinity_string, "-Infinity") \
- V(_, Module_string, "Module") \
- V(_, multiline_string, "multiline") \
- V(_, name_string, "name") \
- V(_, NaN_string, "NaN") \
- V(_, narrow_string, "narrow") \
- V(_, native_string, "native") \
- V(_, new_target_string, ".new.target") \
- V(_, next_string, "next") \
- V(_, NFC_string, "NFC") \
- V(_, NFD_string, "NFD") \
- V(_, NFKC_string, "NFKC") \
- V(_, NFKD_string, "NFKD") \
- V(_, not_equal_string, "not-equal") \
- V(_, null_string, "null") \
- V(_, null_to_string, "[object Null]") \
- V(_, Number_string, "Number") \
- V(_, number_string, "number") \
- V(_, number_to_string, "[object Number]") \
- V(_, Object_string, "Object") \
- V(_, object_string, "object") \
- V(_, object_to_string, "[object Object]") \
- V(_, of_string, "of") \
- V(_, ok_string, "ok") \
- V(_, one_string, "1") \
- V(_, other_string, "other") \
- V(_, ownKeys_string, "ownKeys") \
- V(_, percent_string, "percent") \
- V(_, position_string, "position") \
- V(_, preventExtensions_string, "preventExtensions") \
- V(_, private_constructor_string, "#constructor") \
- V(_, Promise_string, "Promise") \
- V(_, proto_string, "__proto__") \
- V(_, prototype_string, "prototype") \
- V(_, proxy_string, "proxy") \
- V(_, Proxy_string, "Proxy") \
- V(_, query_colon_string, "(?:)") \
- V(_, RangeError_string, "RangeError") \
- V(_, raw_string, "raw") \
- V(_, ReferenceError_string, "ReferenceError") \
- V(_, ReflectGet_string, "Reflect.get") \
- V(_, ReflectHas_string, "Reflect.has") \
- V(_, RegExp_string, "RegExp") \
- V(_, regexp_to_string, "[object RegExp]") \
- V(_, resolve_string, "resolve") \
- V(_, return_string, "return") \
- V(_, revoke_string, "revoke") \
- V(_, RuntimeError_string, "RuntimeError") \
- V(_, Script_string, "Script") \
- V(_, script_string, "script") \
- V(_, short_string, "short") \
- V(_, Set_string, "Set") \
- V(_, sentence_string, "sentence") \
- V(_, set_space_string, "set ") \
- V(_, set_string, "set") \
- V(_, SetIterator_string, "Set Iterator") \
- V(_, setPrototypeOf_string, "setPrototypeOf") \
- V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
- V(_, source_string, "source") \
- V(_, sourceText_string, "sourceText") \
- V(_, stack_string, "stack") \
- V(_, stackTraceLimit_string, "stackTraceLimit") \
- V(_, sticky_string, "sticky") \
- V(_, String_string, "String") \
- V(_, string_string, "string") \
- V(_, string_to_string, "[object String]") \
- V(_, symbol_species_string, "[Symbol.species]") \
- V(_, Symbol_string, "Symbol") \
- V(_, symbol_string, "symbol") \
- V(_, SyntaxError_string, "SyntaxError") \
- V(_, target_string, "target") \
- V(_, then_string, "then") \
- V(_, this_function_string, ".this_function") \
- V(_, this_string, "this") \
- V(_, throw_string, "throw") \
- V(_, timed_out_string, "timed-out") \
- V(_, toJSON_string, "toJSON") \
- V(_, toString_string, "toString") \
- V(_, true_string, "true") \
- V(_, total_string, "total") \
- V(_, TypeError_string, "TypeError") \
- V(_, Uint16Array_string, "Uint16Array") \
- V(_, Uint32Array_string, "Uint32Array") \
- V(_, Uint8Array_string, "Uint8Array") \
- V(_, Uint8ClampedArray_string, "Uint8ClampedArray") \
- V(_, undefined_string, "undefined") \
- V(_, undefined_to_string, "[object Undefined]") \
- V(_, unicode_string, "unicode") \
- V(_, URIError_string, "URIError") \
- V(_, value_string, "value") \
- V(_, valueOf_string, "valueOf") \
- V(_, WeakMap_string, "WeakMap") \
- V(_, WeakRef_string, "WeakRef") \
- V(_, WeakSet_string, "WeakSet") \
- V(_, week_string, "week") \
- V(_, word_string, "word") \
- V(_, writable_string, "writable") \
+#define INTERNALIZED_STRING_LIST_GENERATOR(V, _) \
+ INTERNALIZED_STRING_LIST_GENERATOR_INTL(V, _) \
+ V(_, add_string, "add") \
+ V(_, AggregateError_string, "AggregateError") \
+ V(_, always_string, "always") \
+ V(_, anonymous_function_string, "(anonymous function)") \
+ V(_, anonymous_string, "anonymous") \
+ V(_, apply_string, "apply") \
+ V(_, Arguments_string, "Arguments") \
+ V(_, arguments_string, "arguments") \
+ V(_, arguments_to_string, "[object Arguments]") \
+ V(_, Array_string, "Array") \
+ V(_, array_to_string, "[object Array]") \
+ V(_, ArrayBuffer_string, "ArrayBuffer") \
+ V(_, ArrayIterator_string, "Array Iterator") \
+ V(_, as_string, "as") \
+ V(_, assert_string, "assert") \
+ V(_, async_string, "async") \
+ V(_, auto_string, "auto") \
+ V(_, await_string, "await") \
+ V(_, BigInt_string, "BigInt") \
+ V(_, bigint_string, "bigint") \
+ V(_, BigInt64Array_string, "BigInt64Array") \
+ V(_, BigUint64Array_string, "BigUint64Array") \
+ V(_, bind_string, "bind") \
+ V(_, Boolean_string, "Boolean") \
+ V(_, boolean_string, "boolean") \
+ V(_, boolean_to_string, "[object Boolean]") \
+ V(_, bound__string, "bound ") \
+ V(_, buffer_string, "buffer") \
+ V(_, byte_length_string, "byteLength") \
+ V(_, byte_offset_string, "byteOffset") \
+ V(_, CompileError_string, "CompileError") \
+ V(_, callee_string, "callee") \
+ V(_, caller_string, "caller") \
+ V(_, cause_string, "cause") \
+ V(_, character_string, "character") \
+ V(_, closure_string, "(closure)") \
+ V(_, code_string, "code") \
+ V(_, column_string, "column") \
+ V(_, computed_string, "<computed>") \
+ V(_, configurable_string, "configurable") \
+ V(_, conjunction_string, "conjunction") \
+ V(_, construct_string, "construct") \
+ V(_, constructor_string, "constructor") \
+ V(_, current_string, "current") \
+ V(_, Date_string, "Date") \
+ V(_, date_to_string, "[object Date]") \
+ V(_, default_string, "default") \
+ V(_, defineProperty_string, "defineProperty") \
+ V(_, deleteProperty_string, "deleteProperty") \
+ V(_, disjunction_string, "disjunction") \
+ V(_, done_string, "done") \
+ V(_, dot_brand_string, ".brand") \
+ V(_, dot_catch_string, ".catch") \
+ V(_, dot_default_string, ".default") \
+ V(_, dot_for_string, ".for") \
+ V(_, dot_generator_object_string, ".generator_object") \
+ V(_, dot_home_object_string, ".home_object") \
+ V(_, dot_result_string, ".result") \
+ V(_, dot_repl_result_string, ".repl_result") \
+ V(_, dot_static_home_object_string, "._static_home_object") \
+ V(_, dot_string, ".") \
+ V(_, dot_switch_tag_string, ".switch_tag") \
+ V(_, dotAll_string, "dotAll") \
+ V(_, enumerable_string, "enumerable") \
+ V(_, element_string, "element") \
+ V(_, Error_string, "Error") \
+ V(_, errors_string, "errors") \
+ V(_, error_to_string, "[object Error]") \
+ V(_, eval_string, "eval") \
+ V(_, EvalError_string, "EvalError") \
+ V(_, exec_string, "exec") \
+ V(_, false_string, "false") \
+ V(_, FinalizationRegistry_string, "FinalizationRegistry") \
+ V(_, flags_string, "flags") \
+ V(_, Float32Array_string, "Float32Array") \
+ V(_, Float64Array_string, "Float64Array") \
+ V(_, from_string, "from") \
+ V(_, Function_string, "Function") \
+ V(_, function_native_code_string, "function () { [native code] }") \
+ V(_, function_string, "function") \
+ V(_, function_to_string, "[object Function]") \
+ V(_, Generator_string, "Generator") \
+ V(_, get_space_string, "get ") \
+ V(_, get_string, "get") \
+ V(_, getOwnPropertyDescriptor_string, "getOwnPropertyDescriptor") \
+ V(_, getPrototypeOf_string, "getPrototypeOf") \
+ V(_, global_string, "global") \
+ V(_, globalThis_string, "globalThis") \
+ V(_, groups_string, "groups") \
+ V(_, GrowableSharedArrayBuffer_string, "GrowableSharedArrayBuffer") \
+ V(_, has_string, "has") \
+ V(_, has_indices_string, "hasIndices") \
+ V(_, ignoreCase_string, "ignoreCase") \
+ V(_, illegal_access_string, "illegal access") \
+ V(_, illegal_argument_string, "illegal argument") \
+ V(_, index_string, "index") \
+ V(_, indices_string, "indices") \
+ V(_, Infinity_string, "Infinity") \
+ V(_, infinity_string, "infinity") \
+ V(_, input_string, "input") \
+ V(_, Int16Array_string, "Int16Array") \
+ V(_, Int32Array_string, "Int32Array") \
+ V(_, Int8Array_string, "Int8Array") \
+ V(_, isExtensible_string, "isExtensible") \
+ V(_, jsMemoryEstimate_string, "jsMemoryEstimate") \
+ V(_, jsMemoryRange_string, "jsMemoryRange") \
+ V(_, keys_string, "keys") \
+ V(_, lastIndex_string, "lastIndex") \
+ V(_, length_string, "length") \
+ V(_, let_string, "let") \
+ V(_, line_string, "line") \
+ V(_, linear_string, "linear") \
+ V(_, LinkError_string, "LinkError") \
+ V(_, long_string, "long") \
+ V(_, Map_string, "Map") \
+ V(_, MapIterator_string, "Map Iterator") \
+ V(_, max_byte_length_string, "maxByteLength") \
+ V(_, medium_string, "medium") \
+ V(_, message_string, "message") \
+ V(_, meta_string, "meta") \
+ V(_, minus_Infinity_string, "-Infinity") \
+ V(_, Module_string, "Module") \
+ V(_, multiline_string, "multiline") \
+ V(_, name_string, "name") \
+ V(_, NaN_string, "NaN") \
+ V(_, narrow_string, "narrow") \
+ V(_, native_string, "native") \
+ V(_, new_target_string, ".new.target") \
+ V(_, next_string, "next") \
+ V(_, NFC_string, "NFC") \
+ V(_, NFD_string, "NFD") \
+ V(_, NFKC_string, "NFKC") \
+ V(_, NFKD_string, "NFKD") \
+ V(_, not_equal_string, "not-equal") \
+ V(_, null_string, "null") \
+ V(_, null_to_string, "[object Null]") \
+ V(_, Number_string, "Number") \
+ V(_, number_string, "number") \
+ V(_, number_to_string, "[object Number]") \
+ V(_, Object_string, "Object") \
+ V(_, object_string, "object") \
+ V(_, object_to_string, "[object Object]") \
+ V(_, of_string, "of") \
+ V(_, ok_string, "ok") \
+ V(_, one_string, "1") \
+ V(_, other_string, "other") \
+ V(_, ownKeys_string, "ownKeys") \
+ V(_, percent_string, "percent") \
+ V(_, position_string, "position") \
+ V(_, preventExtensions_string, "preventExtensions") \
+ V(_, private_constructor_string, "#constructor") \
+ V(_, Promise_string, "Promise") \
+ V(_, proto_string, "__proto__") \
+ V(_, prototype_string, "prototype") \
+ V(_, proxy_string, "proxy") \
+ V(_, Proxy_string, "Proxy") \
+ V(_, query_colon_string, "(?:)") \
+ V(_, RangeError_string, "RangeError") \
+ V(_, raw_string, "raw") \
+ V(_, ReferenceError_string, "ReferenceError") \
+ V(_, ReflectGet_string, "Reflect.get") \
+ V(_, ReflectHas_string, "Reflect.has") \
+ V(_, RegExp_string, "RegExp") \
+ V(_, regexp_to_string, "[object RegExp]") \
+ V(_, ResizableArrayBuffer_string, "ResizableArrayBuffer") \
+ V(_, resolve_string, "resolve") \
+ V(_, return_string, "return") \
+ V(_, revoke_string, "revoke") \
+ V(_, RuntimeError_string, "RuntimeError") \
+ V(_, Script_string, "Script") \
+ V(_, script_string, "script") \
+ V(_, short_string, "short") \
+ V(_, Set_string, "Set") \
+ V(_, sentence_string, "sentence") \
+ V(_, set_space_string, "set ") \
+ V(_, set_string, "set") \
+ V(_, SetIterator_string, "Set Iterator") \
+ V(_, setPrototypeOf_string, "setPrototypeOf") \
+ V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \
+ V(_, source_string, "source") \
+ V(_, sourceText_string, "sourceText") \
+ V(_, stack_string, "stack") \
+ V(_, stackTraceLimit_string, "stackTraceLimit") \
+ V(_, sticky_string, "sticky") \
+ V(_, String_string, "String") \
+ V(_, string_string, "string") \
+ V(_, string_to_string, "[object String]") \
+ V(_, symbol_species_string, "[Symbol.species]") \
+ V(_, Symbol_string, "Symbol") \
+ V(_, symbol_string, "symbol") \
+ V(_, SyntaxError_string, "SyntaxError") \
+ V(_, target_string, "target") \
+ V(_, then_string, "then") \
+ V(_, this_function_string, ".this_function") \
+ V(_, this_string, "this") \
+ V(_, throw_string, "throw") \
+ V(_, timed_out_string, "timed-out") \
+ V(_, toJSON_string, "toJSON") \
+ V(_, toString_string, "toString") \
+ V(_, true_string, "true") \
+ V(_, total_string, "total") \
+ V(_, TypeError_string, "TypeError") \
+ V(_, Uint16Array_string, "Uint16Array") \
+ V(_, Uint32Array_string, "Uint32Array") \
+ V(_, Uint8Array_string, "Uint8Array") \
+ V(_, Uint8ClampedArray_string, "Uint8ClampedArray") \
+ V(_, undefined_string, "undefined") \
+ V(_, undefined_to_string, "[object Undefined]") \
+ V(_, unicode_string, "unicode") \
+ V(_, URIError_string, "URIError") \
+ V(_, value_string, "value") \
+ V(_, valueOf_string, "valueOf") \
+ V(_, WeakMap_string, "WeakMap") \
+ V(_, WeakRef_string, "WeakRef") \
+ V(_, WeakSet_string, "WeakSet") \
+ V(_, week_string, "week") \
+ V(_, word_string, "word") \
+ V(_, writable_string, "writable") \
V(_, zero_string, "0")
#define PRIVATE_SYMBOL_LIST_GENERATOR(V, _) \
@@ -349,6 +367,7 @@
V(_, error_start_pos_symbol) \
V(_, frozen_symbol) \
V(_, interpreter_trampoline_symbol) \
+ V(_, mega_dom_symbol) \
V(_, megamorphic_symbol) \
V(_, native_context_index_symbol) \
V(_, nonextensible_symbol) \
diff --git a/chromium/v8/src/init/isolate-allocator.cc b/chromium/v8/src/init/isolate-allocator.cc
index 5db27d288b1..a479f1ab945 100644
--- a/chromium/v8/src/init/isolate-allocator.cc
+++ b/chromium/v8/src/init/isolate-allocator.cc
@@ -3,39 +3,18 @@
// found in the LICENSE file.
#include "src/init/isolate-allocator.h"
+
#include "src/base/bounded-page-allocator.h"
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
+#include "src/heap/code-range.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
namespace v8 {
namespace internal {
-IsolateAllocator::IsolateAllocator() {
#ifdef V8_COMPRESS_POINTERS
- Address heap_reservation_address = InitReservation();
- CommitPagesForIsolate(heap_reservation_address);
-#else
- // Allocate Isolate in C++ heap.
- page_allocator_ = GetPlatformPageAllocator();
- isolate_memory_ = ::operator new(sizeof(Isolate));
- DCHECK(!reservation_.IsReserved());
-#endif // V8_COMPRESS_POINTERS
-}
-
-IsolateAllocator::~IsolateAllocator() {
- if (reservation_.IsReserved()) {
- // The actual memory will be freed when the |reservation_| will die.
- return;
- }
-
- // The memory was allocated in C++ heap.
- ::operator delete(isolate_memory_);
-}
-
-#ifdef V8_COMPRESS_POINTERS
-
namespace {
// "IsolateRootBiasPage" is an optional region before the 4Gb aligned
@@ -50,109 +29,125 @@ inline size_t GetIsolateRootBiasPageSize(
} // namespace
-Address IsolateAllocator::InitReservation() {
- v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
+struct PtrComprCageReservationParams
+ : public VirtualMemoryCage::ReservationParams {
+ PtrComprCageReservationParams() {
+ page_allocator = GetPlatformPageAllocator();
+
+ // This is only used when there is a per-Isolate cage, in which case the
+ // Isolate is allocated within the cage, and the Isolate root is also the
+ // cage base.
+ const size_t kIsolateRootBiasPageSize =
+ COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL
+ ? GetIsolateRootBiasPageSize(page_allocator)
+ : 0;
+ reservation_size = kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
+ base_alignment = kPtrComprCageBaseAlignment;
+ base_bias_size = kIsolateRootBiasPageSize;
+
+ // Simplify BoundedPageAllocator's life by configuring it to use same page
+ // size as the Heap will use (MemoryChunk::kPageSize).
+ page_size =
+ RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize());
+ requested_start_hint =
+ reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr());
+ }
+};
+#endif // V8_COMPRESS_POINTERS
- const size_t kIsolateRootBiasPageSize =
- GetIsolateRootBiasPageSize(platform_page_allocator);
-
- // Reserve a |4Gb + kIsolateRootBiasPageSize| region such as that the
- // resevation address plus |kIsolateRootBiasPageSize| is 4Gb aligned.
- const size_t reservation_size =
- kPtrComprCageReservationSize + kIsolateRootBiasPageSize;
- const size_t base_alignment = kPtrComprCageBaseAlignment;
-
- const int kMaxAttempts = 4;
- for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
- Address hint = RoundDown(reinterpret_cast<Address>(
- platform_page_allocator->GetRandomMmapAddr()),
- base_alignment) -
- kIsolateRootBiasPageSize;
-
- // Within this reservation there will be a sub-region with proper alignment.
- VirtualMemory padded_reservation(platform_page_allocator,
- reservation_size * 2,
- reinterpret_cast<void*>(hint));
- if (!padded_reservation.IsReserved()) break;
-
- // Find properly aligned sub-region inside the reservation.
- Address address =
- RoundUp(padded_reservation.address() + kIsolateRootBiasPageSize,
- base_alignment) -
- kIsolateRootBiasPageSize;
- CHECK(padded_reservation.InVM(address, reservation_size));
-
-#if defined(V8_OS_FUCHSIA)
- // Fuchsia does not respect given hints so as a workaround we will use
- // overreserved address space region instead of trying to re-reserve
- // a subregion.
- bool overreserve = true;
-#else
- // For the last attempt use the overreserved region to avoid an OOM crash.
- // This case can happen if there are many isolates being created in
- // parallel that race for reserving the regions.
- bool overreserve = (attempt == kMaxAttempts - 1);
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+namespace {
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(VirtualMemoryCage, GetProcessWidePtrComprCage)
+} // anonymous namespace
+
+// static
+void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
+ if (std::shared_ptr<CodeRange> code_range =
+ CodeRange::GetProcessWideCodeRange()) {
+ code_range->Free();
+ }
+ GetProcessWidePtrComprCage()->Free();
+}
+#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+
+// static
+void IsolateAllocator::InitializeOncePerProcess() {
+#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ PtrComprCageReservationParams params;
+ if (!GetProcessWidePtrComprCage()->InitReservation(params)) {
+ V8::FatalProcessOutOfMemory(
+ nullptr,
+ "Failed to reserve virtual memory for process-wide V8 "
+ "pointer compression cage");
+ }
#endif
+}
- if (overreserve) {
- if (padded_reservation.InVM(address, reservation_size)) {
- reservation_ = std::move(padded_reservation);
- return address;
- }
- } else {
- // Now free the padded reservation and immediately try to reserve an exact
- // region at aligned address. We have to do this dancing because the
- // reservation address requirement is more complex than just a certain
- // alignment and not all operating systems support freeing parts of
- // reserved address space regions.
- padded_reservation.Free();
-
- VirtualMemory reservation(platform_page_allocator, reservation_size,
- reinterpret_cast<void*>(address));
- if (!reservation.IsReserved()) break;
-
- // The reservation could still be somewhere else but we can accept it
- // if it has the required alignment.
- Address address =
- RoundUp(reservation.address() + kIsolateRootBiasPageSize,
- base_alignment) -
- kIsolateRootBiasPageSize;
-
- if (reservation.address() == address) {
- reservation_ = std::move(reservation);
- CHECK_EQ(reservation_.size(), reservation_size);
- return address;
- }
- }
+IsolateAllocator::IsolateAllocator() {
+#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
+ PtrComprCageReservationParams params;
+ if (!isolate_ptr_compr_cage_.InitReservation(params)) {
+ V8::FatalProcessOutOfMemory(
+ nullptr,
+ "Failed to reserve memory for Isolate V8 pointer compression cage");
}
- V8::FatalProcessOutOfMemory(nullptr,
- "Failed to reserve memory for new V8 Isolate");
- return kNullAddress;
+ page_allocator_ = isolate_ptr_compr_cage_.page_allocator();
+ CommitPagesForIsolate();
+#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+ // Allocate Isolate in C++ heap when sharing a cage.
+ CHECK(GetProcessWidePtrComprCage()->IsReserved());
+ page_allocator_ = GetProcessWidePtrComprCage()->page_allocator();
+ isolate_memory_ = ::operator new(sizeof(Isolate));
+#else
+ // Allocate Isolate in C++ heap.
+ page_allocator_ = GetPlatformPageAllocator();
+ isolate_memory_ = ::operator new(sizeof(Isolate));
+#endif // V8_COMPRESS_POINTERS
+
+ CHECK_NOT_NULL(page_allocator_);
}
-void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
- v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
+IsolateAllocator::~IsolateAllocator() {
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+ if (isolate_ptr_compr_cage_.reservation()->IsReserved()) {
+ // The actual memory will be freed when the |isolate_ptr_compr_cage_| will
+ // die.
+ return;
+ }
+#endif
- const size_t kIsolateRootBiasPageSize =
- GetIsolateRootBiasPageSize(platform_page_allocator);
+ // The memory was allocated in C++ heap.
+ ::operator delete(isolate_memory_);
+}
- Address isolate_root = heap_reservation_address + kIsolateRootBiasPageSize;
- CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
+VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() {
+#if defined V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+ return &isolate_ptr_compr_cage_;
+#elif defined V8_COMPRESS_POINTERS_IN_SHARED_CAGE
+ return GetProcessWidePtrComprCage();
+#else
+ return nullptr;
+#endif
+}
- CHECK(reservation_.InVM(
- heap_reservation_address,
- kPtrComprCageReservationSize + kIsolateRootBiasPageSize));
+const VirtualMemoryCage* IsolateAllocator::GetPtrComprCage() const {
+ return const_cast<IsolateAllocator*>(this)->GetPtrComprCage();
+}
- // Simplify BoundedPageAllocator's life by configuring it to use same page
- // size as the Heap will use (MemoryChunk::kPageSize).
- size_t page_size = RoundUp(size_t{1} << kPageSizeBits,
- platform_page_allocator->AllocatePageSize());
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+void IsolateAllocator::CommitPagesForIsolate() {
+ v8::PageAllocator* platform_page_allocator = GetPlatformPageAllocator();
- page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
- platform_page_allocator, isolate_root, kPtrComprCageReservationSize,
- page_size);
- page_allocator_ = page_allocator_instance_.get();
+ CHECK(isolate_ptr_compr_cage_.IsReserved());
+ Address isolate_root = isolate_ptr_compr_cage_.base();
+ CHECK(IsAligned(isolate_root, kPtrComprCageBaseAlignment));
+ CHECK_GE(isolate_ptr_compr_cage_.reservation()->size(),
+ kPtrComprCageReservationSize +
+ GetIsolateRootBiasPageSize(platform_page_allocator));
+ CHECK(isolate_ptr_compr_cage_.reservation()->InVM(
+ isolate_root, kPtrComprCageReservationSize));
+ size_t page_size = page_allocator_->AllocatePageSize();
Address isolate_address = isolate_root - Isolate::isolate_root_bias();
Address isolate_end = isolate_address + sizeof(Isolate);
@@ -162,7 +157,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
size_t reserved_region_size =
RoundUp(isolate_end, page_size) - reserved_region_address;
- CHECK(page_allocator_instance_->AllocatePagesAt(
+ CHECK(isolate_ptr_compr_cage_.page_allocator()->AllocatePagesAt(
reserved_region_address, reserved_region_size,
PageAllocator::Permission::kNoAccess));
}
@@ -175,11 +170,12 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
size_t committed_region_size =
RoundUp(isolate_end, commit_page_size) - committed_region_address;
- // We are using |reservation_| directly here because |page_allocator_| has
- // bigger commit page size than we actually need.
- CHECK(reservation_.SetPermissions(committed_region_address,
- committed_region_size,
- PageAllocator::kReadWrite));
+ // We are using |isolate_ptr_compr_cage_.reservation()| directly here
+ // because |page_allocator_| has bigger commit page size than we actually
+ // need.
+ CHECK(isolate_ptr_compr_cage_.reservation()->SetPermissions(
+ committed_region_address, committed_region_size,
+ PageAllocator::kReadWrite));
if (Heap::ShouldZapGarbage()) {
MemsetPointer(reinterpret_cast<Address*>(committed_region_address),
@@ -188,7 +184,7 @@ void IsolateAllocator::CommitPagesForIsolate(Address heap_reservation_address) {
}
isolate_memory_ = reinterpret_cast<void*>(isolate_address);
}
-#endif // V8_COMPRESS_POINTERS
+#endif // V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/init/isolate-allocator.h b/chromium/v8/src/init/isolate-allocator.h
index 9f894da1595..2bf739ec498 100644
--- a/chromium/v8/src/init/isolate-allocator.h
+++ b/chromium/v8/src/init/isolate-allocator.h
@@ -7,26 +7,24 @@
#include <memory>
-#include "src/base/bounded-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/common/globals.h"
+#include "src/flags/flags.h"
#include "src/utils/allocation.h"
namespace v8 {
-
-// Forward declarations.
-namespace base {
-class BoundedPageAllocator;
-} // namespace base
-
namespace internal {
// IsolateAllocator object is responsible for allocating memory for one (!)
// Isolate object. Depending on the whether pointer compression is enabled,
// the memory can be allocated
-// 1) in the C++ heap (when pointer compression is disabled)
+//
+// 1) in the C++ heap (when pointer compression is disabled or when multiple
+// Isolates share a pointer compression cage)
+//
// 2) in a proper part of a properly aligned region of a reserved address space
-// (when pointer compression is enabled).
+// (when pointer compression is enabled and each Isolate has its own pointer
+// compression cage).
//
// Isolate::New() first creates IsolateAllocator object which allocates the
// memory and then it constructs Isolate object in this memory. Once it's done
@@ -44,15 +42,30 @@ class V8_EXPORT_PRIVATE IsolateAllocator final {
v8::PageAllocator* page_allocator() const { return page_allocator_; }
+ Address GetPtrComprCageBase() const {
+ return COMPRESS_POINTERS_BOOL ? GetPtrComprCage()->base() : kNullAddress;
+ }
+
+ // When pointer compression is on, return the pointer compression
+ // cage. Otherwise return nullptr.
+ VirtualMemoryCage* GetPtrComprCage();
+ const VirtualMemoryCage* GetPtrComprCage() const;
+
+ static void InitializeOncePerProcess();
+
private:
- Address InitReservation();
- void CommitPagesForIsolate(Address heap_reservation_address);
+ void CommitPagesForIsolate();
+
+ friend class SequentialUnmapperTest;
+ // Only used for testing.
+ static void FreeProcessWidePtrComprCageForTesting();
// The allocated memory for Isolate instance.
void* isolate_memory_ = nullptr;
v8::PageAllocator* page_allocator_ = nullptr;
- std::unique_ptr<base::BoundedPageAllocator> page_allocator_instance_;
- VirtualMemory reservation_;
+#ifdef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
+ VirtualMemoryCage isolate_ptr_compr_cage_;
+#endif
};
} // namespace internal
diff --git a/chromium/v8/src/init/v8.cc b/chromium/v8/src/init/v8.cc
index fbf120b1b61..e14d02efbdc 100644
--- a/chromium/v8/src/init/v8.cc
+++ b/chromium/v8/src/init/v8.cc
@@ -133,6 +133,26 @@ void V8::InitializeOncePerProcessImpl() {
}
#endif
+ // When fuzzing and concurrent compilation is enabled, disable Turbofan
+ // tracing flags since reading/printing heap state is not thread-safe and
+ // leads to false positives on TSAN bots.
+ // TODO(chromium:1205289): Teach relevant fuzzers to not pass TF tracing
+ // flags instead, and remove this section.
+ if (FLAG_fuzzing && FLAG_concurrent_recompilation) {
+ FLAG_trace_turbo = false;
+ FLAG_trace_turbo_graph = false;
+ FLAG_trace_turbo_scheduled = false;
+ FLAG_trace_turbo_reduction = false;
+ FLAG_trace_turbo_trimming = false;
+ FLAG_trace_turbo_jt = false;
+ FLAG_trace_turbo_ceq = false;
+ FLAG_trace_turbo_loop = false;
+ FLAG_trace_turbo_alloc = false;
+ FLAG_trace_all_uses = false;
+ FLAG_trace_representation = false;
+ FLAG_trace_turbo_stack_accesses = false;
+ }
+
if (FLAG_regexp_interpret_all && FLAG_regexp_tier_up) {
// Turning off the tier-up strategy, because the --regexp-interpret-all and
// --regexp-tier-up flags are incompatible.
@@ -151,6 +171,7 @@ void V8::InitializeOncePerProcessImpl() {
#if defined(V8_USE_PERFETTO)
if (perfetto::Tracing::IsInitialized()) TrackEvent::Register();
#endif
+ IsolateAllocator::InitializeOncePerProcess();
Isolate::InitializeOncePerProcess();
#if defined(USE_SIMULATOR)
@@ -163,6 +184,8 @@ void V8::InitializeOncePerProcessImpl() {
#if V8_ENABLE_WEBASSEMBLY
wasm::WasmEngine::InitializeOncePerProcess();
#endif // V8_ENABLE_WEBASSEMBLY
+
+ ExternalReferenceTable::InitializeOncePerProcess();
}
void V8::InitializeOncePerProcess() {
diff --git a/chromium/v8/src/inspector/injected-script.cc b/chromium/v8/src/inspector/injected-script.cc
index 992ff50fe7d..fc029e937aa 100644
--- a/chromium/v8/src/inspector/injected-script.cc
+++ b/chromium/v8/src/inspector/injected-script.cc
@@ -302,7 +302,8 @@ class InjectedScript::ProtocolPromiseHandler {
exceptionDetails->setStackTrace(
stack->buildInspectorObjectImpl(m_inspector->debugger()));
if (stack && !stack->isEmpty())
- exceptionDetails->setScriptId(toString16(stack->topScriptId()));
+ exceptionDetails->setScriptId(
+ String16::fromInteger(stack->topScriptId()));
callback->sendSuccess(std::move(wrappedValue), std::move(exceptionDetails));
}
diff --git a/chromium/v8/src/inspector/injected-script.h b/chromium/v8/src/inspector/injected-script.h
index 01d0ff06653..9971d7da3a7 100644
--- a/chromium/v8/src/inspector/injected-script.h
+++ b/chromium/v8/src/inspector/injected-script.h
@@ -173,7 +173,8 @@ class InjectedScript final {
int m_sessionId;
};
- class ContextScope : public Scope {
+ class ContextScope : public Scope,
+ public V8InspectorSession::CommandLineAPIScope {
public:
ContextScope(V8InspectorSessionImpl*, int executionContextId);
~ContextScope() override;
diff --git a/chromium/v8/src/inspector/string-16.h b/chromium/v8/src/inspector/string-16.h
index 88a7584255f..b38917185e8 100644
--- a/chromium/v8/src/inspector/string-16.h
+++ b/chromium/v8/src/inspector/string-16.h
@@ -27,8 +27,8 @@ class String16 {
String16(const String16&) V8_NOEXCEPT = default;
String16(String16&&) V8_NOEXCEPT = default;
String16(const UChar* characters, size_t size);
- V8_EXPORT String16(const UChar* characters); // NOLINT(runtime/explicit)
- V8_EXPORT String16(const char* characters); // NOLINT(runtime/explicit)
+ V8_EXPORT String16(const UChar* characters);
+ V8_EXPORT String16(const char* characters);
String16(const char* characters, size_t size);
explicit String16(const std::basic_string<UChar>& impl);
explicit String16(std::basic_string<UChar>&& impl);
diff --git a/chromium/v8/src/inspector/v8-console-message.cc b/chromium/v8/src/inspector/v8-console-message.cc
index 58a37073a82..1b03a02c7bb 100644
--- a/chromium/v8/src/inspector/v8-console-message.cc
+++ b/chromium/v8/src/inspector/v8-console-message.cc
@@ -152,7 +152,7 @@ class V8ValueStringBuilder {
bool append(v8::Local<v8::Symbol> symbol) {
m_builder.append("Symbol(");
- bool result = append(symbol->Description(), IgnoreUndefined);
+ bool result = append(symbol->Description(m_isolate), IgnoreUndefined);
m_builder.append(')');
return result;
}
diff --git a/chromium/v8/src/inspector/v8-console.cc b/chromium/v8/src/inspector/v8-console.cc
index cc464ebe048..afe790fb62b 100644
--- a/chromium/v8/src/inspector/v8-console.cc
+++ b/chromium/v8/src/inspector/v8-console.cc
@@ -162,14 +162,9 @@ class ConsoleHelper {
int m_groupId;
};
-void returnDataCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
- info.GetReturnValue().Set(info.Data());
-}
-
void createBoundFunctionProperty(
v8::Local<v8::Context> context, v8::Local<v8::Object> console,
v8::Local<v8::Value> data, const char* name, v8::FunctionCallback callback,
- const char* description = nullptr,
v8::SideEffectType side_effect_type = v8::SideEffectType::kHasSideEffect) {
v8::Local<v8::String> funcName =
toV8StringInternalized(context->GetIsolate(), name);
@@ -179,18 +174,6 @@ void createBoundFunctionProperty(
.ToLocal(&func))
return;
func->SetName(funcName);
- if (description) {
- v8::Local<v8::String> returnValue =
- toV8String(context->GetIsolate(), description);
- v8::Local<v8::Function> toStringFunction;
- if (v8::Function::New(context, returnDataCallback, returnValue, 0,
- v8::ConstructorBehavior::kThrow,
- v8::SideEffectType::kHasNoSideEffect)
- .ToLocal(&toStringFunction))
- createDataProperty(context, func, toV8StringInternalized(
- context->GetIsolate(), "toString"),
- toStringFunction);
- }
createDataProperty(context, console, funcName, func);
}
@@ -498,11 +481,11 @@ void V8Console::valuesCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
info.GetReturnValue().Set(values);
}
-static void setFunctionBreakpoint(
- ConsoleHelper& helper, // NOLINT(runtime/references)
- int sessionId, v8::Local<v8::Function> function,
- V8DebuggerAgentImpl::BreakpointSource source,
- v8::Local<v8::String> condition, bool enable) {
+static void setFunctionBreakpoint(ConsoleHelper& helper, int sessionId,
+ v8::Local<v8::Function> function,
+ V8DebuggerAgentImpl::BreakpointSource source,
+ v8::Local<v8::String> condition,
+ bool enable) {
V8InspectorSessionImpl* session = helper.session(sessionId);
if (session == nullptr) return;
if (!session->debuggerAgent()->enabled()) return;
@@ -557,8 +540,9 @@ void V8Console::monitorFunctionCallback(
else
builder.append(functionName);
builder.append(
- " called\" + (arguments.length > 0 ? \" with arguments: \" + "
- "Array.prototype.join.call(arguments, \", \") : \"\")) && false");
+ " called\" + (typeof arguments !== \"undefined\" && arguments.length > 0 "
+ "? \" with arguments: \" + Array.prototype.join.call(arguments, \", \") "
+ ": \"\")) && false");
setFunctionBreakpoint(helper, sessionId, function,
V8DebuggerAgentImpl::MonitorCommandBreakpointSource,
toV8String(info.GetIsolate(), builder.toString()),
@@ -696,80 +680,62 @@ v8::Local<v8::Object> V8Console::createCommandLineAPI(
*static_cast<CommandLineAPIData*>(data->GetBackingStore()->Data()) =
CommandLineAPIData(this, sessionId);
createBoundFunctionProperty(context, commandLineAPI, data, "dir",
- &V8Console::call<&V8Console::Dir>,
- "function dir(value) { [Command Line API] }");
+ &V8Console::call<&V8Console::Dir>);
createBoundFunctionProperty(context, commandLineAPI, data, "dirxml",
- &V8Console::call<&V8Console::DirXml>,
- "function dirxml(value) { [Command Line API] }");
+ &V8Console::call<&V8Console::DirXml>);
createBoundFunctionProperty(context, commandLineAPI, data, "profile",
- &V8Console::call<&V8Console::Profile>,
- "function profile(title) { [Command Line API] }");
- createBoundFunctionProperty(
- context, commandLineAPI, data, "profileEnd",
- &V8Console::call<&V8Console::ProfileEnd>,
- "function profileEnd(title) { [Command Line API] }");
+ &V8Console::call<&V8Console::Profile>);
+ createBoundFunctionProperty(context, commandLineAPI, data, "profileEnd",
+ &V8Console::call<&V8Console::ProfileEnd>);
createBoundFunctionProperty(context, commandLineAPI, data, "clear",
- &V8Console::call<&V8Console::Clear>,
- "function clear() { [Command Line API] }");
- createBoundFunctionProperty(
- context, commandLineAPI, data, "table",
- &V8Console::call<&V8Console::Table>,
- "function table(data, [columns]) { [Command Line API] }");
+ &V8Console::call<&V8Console::Clear>);
+ createBoundFunctionProperty(context, commandLineAPI, data, "table",
+ &V8Console::call<&V8Console::Table>);
createBoundFunctionProperty(context, commandLineAPI, data, "keys",
&V8Console::call<&V8Console::keysCallback>,
- "function keys(object) { [Command Line API] }",
v8::SideEffectType::kHasNoSideEffect);
createBoundFunctionProperty(context, commandLineAPI, data, "values",
&V8Console::call<&V8Console::valuesCallback>,
- "function values(object) { [Command Line API] }",
v8::SideEffectType::kHasNoSideEffect);
createBoundFunctionProperty(
context, commandLineAPI, data, "debug",
- &V8Console::call<&V8Console::debugFunctionCallback>,
- "function debug(function, condition) { [Command Line API] }");
+ &V8Console::call<&V8Console::debugFunctionCallback>);
createBoundFunctionProperty(
context, commandLineAPI, data, "undebug",
- &V8Console::call<&V8Console::undebugFunctionCallback>,
- "function undebug(function) { [Command Line API] }");
+ &V8Console::call<&V8Console::undebugFunctionCallback>);
createBoundFunctionProperty(
context, commandLineAPI, data, "monitor",
- &V8Console::call<&V8Console::monitorFunctionCallback>,
- "function monitor(function) { [Command Line API] }");
+ &V8Console::call<&V8Console::monitorFunctionCallback>);
createBoundFunctionProperty(
context, commandLineAPI, data, "unmonitor",
- &V8Console::call<&V8Console::unmonitorFunctionCallback>,
- "function unmonitor(function) { [Command Line API] }");
- createBoundFunctionProperty(
- context, commandLineAPI, data, "inspect",
- &V8Console::call<&V8Console::inspectCallback>,
- "function inspect(object) { [Command Line API] }");
+ &V8Console::call<&V8Console::unmonitorFunctionCallback>);
+ createBoundFunctionProperty(context, commandLineAPI, data, "inspect",
+ &V8Console::call<&V8Console::inspectCallback>);
createBoundFunctionProperty(context, commandLineAPI, data, "copy",
- &V8Console::call<&V8Console::copyCallback>,
- "function copy(value) { [Command Line API] }");
+ &V8Console::call<&V8Console::copyCallback>);
createBoundFunctionProperty(
context, commandLineAPI, data, "queryObjects",
- &V8Console::call<&V8Console::queryObjectsCallback>,
- "function queryObjects(constructor) { [Command Line API] }");
+ &V8Console::call<&V8Console::queryObjectsCallback>);
createBoundFunctionProperty(
context, commandLineAPI, data, "$_",
- &V8Console::call<&V8Console::lastEvaluationResultCallback>, nullptr,
+ &V8Console::call<&V8Console::lastEvaluationResultCallback>,
v8::SideEffectType::kHasNoSideEffect);
createBoundFunctionProperty(context, commandLineAPI, data, "$0",
&V8Console::call<&V8Console::inspectedObject0>,
- nullptr, v8::SideEffectType::kHasNoSideEffect);
+ v8::SideEffectType::kHasNoSideEffect);
createBoundFunctionProperty(context, commandLineAPI, data, "$1",
&V8Console::call<&V8Console::inspectedObject1>,
- nullptr, v8::SideEffectType::kHasNoSideEffect);
+ v8::SideEffectType::kHasNoSideEffect);
createBoundFunctionProperty(context, commandLineAPI, data, "$2",
&V8Console::call<&V8Console::inspectedObject2>,
- nullptr, v8::SideEffectType::kHasNoSideEffect);
+ v8::SideEffectType::kHasNoSideEffect);
createBoundFunctionProperty(context, commandLineAPI, data, "$3",
&V8Console::call<&V8Console::inspectedObject3>,
- nullptr, v8::SideEffectType::kHasNoSideEffect);
+ v8::SideEffectType::kHasNoSideEffect);
createBoundFunctionProperty(context, commandLineAPI, data, "$4",
&V8Console::call<&V8Console::inspectedObject4>,
- nullptr, v8::SideEffectType::kHasNoSideEffect);
+ v8::SideEffectType::kHasNoSideEffect);
m_inspector->client()->installAdditionalCommandLineAPI(context,
commandLineAPI);
diff --git a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
index f99d57d33e3..6b54e192a54 100644
--- a/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -1042,23 +1042,7 @@ Response V8DebuggerAgentImpl::restartFrame(
std::unique_ptr<Array<CallFrame>>* newCallFrames,
Maybe<protocol::Runtime::StackTrace>* asyncStackTrace,
Maybe<protocol::Runtime::StackTraceId>* asyncStackTraceId) {
- if (!isPaused()) return Response::ServerError(kDebuggerNotPaused);
- InjectedScript::CallFrameScope scope(m_session, callFrameId);
- Response response = scope.initialize();
- if (!response.IsSuccess()) return response;
- int frameOrdinal = static_cast<int>(scope.frameOrdinal());
- auto it = v8::debug::StackTraceIterator::Create(m_isolate, frameOrdinal);
- if (it->Done()) {
- return Response::ServerError("Could not find call frame with given id");
- }
- if (!it->Restart()) {
- return Response::InternalError();
- }
- response = currentCallFrames(newCallFrames);
- if (!response.IsSuccess()) return response;
- *asyncStackTrace = currentAsyncStackTrace();
- *asyncStackTraceId = currentExternalStackTrace();
- return Response::Success();
+ return Response::ServerError("Frame restarting not supported");
}
Response V8DebuggerAgentImpl::getScriptSource(
diff --git a/chromium/v8/src/inspector/v8-debugger.cc b/chromium/v8/src/inspector/v8-debugger.cc
index 9f035b578ef..6887657239d 100644
--- a/chromium/v8/src/inspector/v8-debugger.cc
+++ b/chromium/v8/src/inspector/v8-debugger.cc
@@ -24,7 +24,7 @@ static const int kMaxAsyncTaskStacks = 128 * 1024;
static const int kNoBreakpointId = 0;
template <typename Map>
-void cleanupExpiredWeakPointers(Map& map) { // NOLINT(runtime/references)
+void cleanupExpiredWeakPointers(Map& map) {
for (auto it = map.begin(); it != map.end();) {
if (it->second.expired()) {
it = map.erase(it);
diff --git a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index 6472c5ca94e..b441be565c9 100644
--- a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -232,10 +232,12 @@ Response V8HeapProfilerAgentImpl::startTrackingHeapObjects(
}
Response V8HeapProfilerAgentImpl::stopTrackingHeapObjects(
- Maybe<bool> reportProgress, Maybe<bool> treatGlobalObjectsAsRoots) {
+ Maybe<bool> reportProgress, Maybe<bool> treatGlobalObjectsAsRoots,
+ Maybe<bool> captureNumericValue) {
requestHeapStatsUpdate();
takeHeapSnapshot(std::move(reportProgress),
- std::move(treatGlobalObjectsAsRoots));
+ std::move(treatGlobalObjectsAsRoots),
+ std::move(captureNumericValue));
stopTrackingHeapObjectsInternal();
return Response::Success();
}
@@ -258,7 +260,8 @@ Response V8HeapProfilerAgentImpl::disable() {
}
Response V8HeapProfilerAgentImpl::takeHeapSnapshot(
- Maybe<bool> reportProgress, Maybe<bool> treatGlobalObjectsAsRoots) {
+ Maybe<bool> reportProgress, Maybe<bool> treatGlobalObjectsAsRoots,
+ Maybe<bool> captureNumericValue) {
v8::HeapProfiler* profiler = m_isolate->GetHeapProfiler();
if (!profiler) return Response::ServerError("Cannot access v8 heap profiler");
std::unique_ptr<HeapSnapshotProgress> progress;
@@ -267,7 +270,8 @@ Response V8HeapProfilerAgentImpl::takeHeapSnapshot(
GlobalObjectNameResolver resolver(m_session);
const v8::HeapSnapshot* snapshot = profiler->TakeHeapSnapshot(
- progress.get(), &resolver, treatGlobalObjectsAsRoots.fromMaybe(true));
+ progress.get(), &resolver, treatGlobalObjectsAsRoots.fromMaybe(true),
+ captureNumericValue.fromMaybe(false));
if (!snapshot) return Response::ServerError("Failed to take heap snapshot");
HeapSnapshotOutputStream stream(&m_frontend);
snapshot->Serialize(&stream);
@@ -375,6 +379,9 @@ Response V8HeapProfilerAgentImpl::startSampling(
const unsigned defaultSamplingInterval = 1 << 15;
double samplingIntervalValue =
samplingInterval.fromMaybe(defaultSamplingInterval);
+ if (samplingIntervalValue <= 0.0) {
+ return Response::ServerError("Invalid sampling interval");
+ }
m_state->setDouble(HeapProfilerAgentState::samplingHeapProfilerInterval,
samplingIntervalValue);
m_state->setBoolean(HeapProfilerAgentState::samplingHeapProfilerEnabled,
diff --git a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h
index b9babdb9898..feda75ffb71 100644
--- a/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/chromium/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -34,14 +34,15 @@ class V8HeapProfilerAgentImpl : public protocol::HeapProfiler::Backend {
Response enable() override;
Response startTrackingHeapObjects(Maybe<bool> trackAllocations) override;
- Response stopTrackingHeapObjects(
- Maybe<bool> reportProgress,
- Maybe<bool> treatGlobalObjectsAsRoots) override;
+ Response stopTrackingHeapObjects(Maybe<bool> reportProgress,
+ Maybe<bool> treatGlobalObjectsAsRoots,
+ Maybe<bool> captureNumericValue) override;
Response disable() override;
Response takeHeapSnapshot(Maybe<bool> reportProgress,
- Maybe<bool> treatGlobalObjectsAsRoots) override;
+ Maybe<bool> treatGlobalObjectsAsRoots,
+ Maybe<bool> captureNumericValue) override;
Response getObjectByHeapObjectId(
const String16& heapSnapshotObjectId, Maybe<String16> objectGroup,
diff --git a/chromium/v8/src/inspector/v8-inspector-session-impl.cc b/chromium/v8/src/inspector/v8-inspector-session-impl.cc
index 02b694a1a98..b926ddfbba7 100644
--- a/chromium/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/chromium/v8/src/inspector/v8-inspector-session-impl.cc
@@ -155,6 +155,20 @@ V8InspectorSessionImpl::~V8InspectorSessionImpl() {
m_inspector->disconnect(this);
}
+std::unique_ptr<V8InspectorSession::CommandLineAPIScope>
+V8InspectorSessionImpl::initializeCommandLineAPIScope(int executionContextId) {
+ auto scope =
+ std::make_unique<InjectedScript::ContextScope>(this, executionContextId);
+ auto result = scope->initialize();
+ if (!result.IsSuccess()) {
+ return nullptr;
+ }
+
+ scope->installCommandLineAPI();
+
+ return scope;
+}
+
protocol::DictionaryValue* V8InspectorSessionImpl::agentState(
const String16& name) {
protocol::DictionaryValue* state = m_state->getObject(name);
diff --git a/chromium/v8/src/inspector/v8-inspector-session-impl.h b/chromium/v8/src/inspector/v8-inspector-session-impl.h
index db530385aab..115c15c5faa 100644
--- a/chromium/v8/src/inspector/v8-inspector-session-impl.h
+++ b/chromium/v8/src/inspector/v8-inspector-session-impl.h
@@ -50,6 +50,9 @@ class V8InspectorSessionImpl : public V8InspectorSession,
int contextGroupId() const { return m_contextGroupId; }
int sessionId() const { return m_sessionId; }
+ std::unique_ptr<V8InspectorSession::CommandLineAPIScope>
+ initializeCommandLineAPIScope(int executionContextId) override;
+
Response findInjectedScript(int contextId, InjectedScript*&);
Response findInjectedScript(RemoteObjectIdBase*, InjectedScript*&);
void reset();
diff --git a/chromium/v8/src/inspector/v8-profiler-agent-impl.cc b/chromium/v8/src/inspector/v8-profiler-agent-impl.cc
index 47be9448799..8ebee1fd21a 100644
--- a/chromium/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -150,10 +150,11 @@ std::unique_ptr<protocol::Debugger::Location> currentDebugLocation(
V8InspectorImpl* inspector) {
std::unique_ptr<V8StackTraceImpl> callStack =
inspector->debugger()->captureStackTrace(false /* fullStack */);
- auto location = protocol::Debugger::Location::create()
- .setScriptId(toString16(callStack->topScriptId()))
- .setLineNumber(callStack->topLineNumber())
- .build();
+ auto location =
+ protocol::Debugger::Location::create()
+ .setScriptId(String16::fromInteger(callStack->topScriptId()))
+ .setLineNumber(callStack->topLineNumber())
+ .build();
location->setColumnNumber(callStack->topColumnNumber());
return location;
}
diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
index 6fee3b3b05f..97fc07a6bf0 100644
--- a/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -109,8 +109,7 @@ bool wrapEvaluateResultAsync(InjectedScript* injectedScript,
}
void innerCallFunctionOn(
- V8InspectorSessionImpl* session,
- InjectedScript::Scope& scope, // NOLINT(runtime/references)
+ V8InspectorSessionImpl* session, InjectedScript::Scope& scope,
v8::Local<v8::Value> recv, const String16& expression,
Maybe<protocol::Array<protocol::Runtime::CallArgument>> optionalArguments,
bool silent, WrapMode wrapMode, bool userGesture, bool awaitPromise,
@@ -694,7 +693,6 @@ protocol::DictionaryValue* getOrCreateDictionary(
Response V8RuntimeAgentImpl::addBinding(const String16& name,
Maybe<int> executionContextId,
Maybe<String16> executionContextName) {
- if (m_activeBindings.count(name)) return Response::Success();
if (executionContextId.isJust()) {
if (executionContextName.isJust()) {
return Response::InvalidParams(
@@ -743,8 +741,8 @@ void V8RuntimeAgentImpl::bindingCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Isolate* isolate = info.GetIsolate();
if (info.Length() != 1 || !info[0]->IsString()) {
- info.GetIsolate()->ThrowException(toV8String(
- isolate, "Invalid arguments: should be exactly one string."));
+ info.GetIsolate()->ThrowError(
+ "Invalid arguments: should be exactly one string.");
return;
}
V8InspectorImpl* inspector =
@@ -764,6 +762,10 @@ void V8RuntimeAgentImpl::bindingCallback(
void V8RuntimeAgentImpl::addBinding(InspectedContext* context,
const String16& name) {
+ auto it = m_activeBindings.find(name);
+ if (it != m_activeBindings.end() && it->second.count(context->contextId())) {
+ return;
+ }
v8::HandleScope handles(m_inspector->isolate());
v8::Local<v8::Context> localContext = context->context();
v8::Local<v8::Object> global = localContext->Global();
@@ -775,7 +777,12 @@ void V8RuntimeAgentImpl::addBinding(InspectedContext* context,
.ToLocal(&functionValue)) {
v8::Maybe<bool> success = global->Set(localContext, v8Name, functionValue);
USE(success);
- m_activeBindings.insert(name);
+ if (it == m_activeBindings.end()) {
+ m_activeBindings.emplace(name,
+ std::unordered_set<int>(context->contextId()));
+ } else {
+ m_activeBindings.at(name).insert(context->contextId());
+ }
}
}
diff --git a/chromium/v8/src/inspector/v8-runtime-agent-impl.h b/chromium/v8/src/inspector/v8-runtime-agent-impl.h
index 5ac1e462f89..4694adae277 100644
--- a/chromium/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/chromium/v8/src/inspector/v8-runtime-agent-impl.h
@@ -148,7 +148,8 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
bool m_enabled;
std::unordered_map<String16, std::unique_ptr<v8::Global<v8::Script>>>
m_compiledScripts;
- std::set<String16> m_activeBindings;
+ // Binding name -> executionContextIds mapping.
+ std::unordered_map<String16, std::unordered_set<int>> m_activeBindings;
};
} // namespace v8_inspector
diff --git a/chromium/v8/src/inspector/v8-stack-trace-impl.cc b/chromium/v8/src/inspector/v8-stack-trace-impl.cc
index 86bedc07ec7..3506c929966 100644
--- a/chromium/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/chromium/v8/src/inspector/v8-stack-trace-impl.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
#include "src/tracing/trace-event.h"
@@ -175,9 +176,9 @@ std::unique_ptr<StringBuffer> V8StackTraceId::ToString() {
}
StackFrame::StackFrame(v8::Isolate* isolate, v8::Local<v8::StackFrame> v8Frame)
- : m_functionName(toProtocolString(isolate, v8Frame->GetFunctionName())),
+ : m_functionName(
+ toProtocolString(isolate, v8::debug::GetFunctionDebugName(v8Frame))),
m_scriptId(v8Frame->GetScriptId()),
- m_scriptIdAsString(String16::fromInteger(v8Frame->GetScriptId())),
m_sourceURL(
toProtocolString(isolate, v8Frame->GetScriptNameOrSourceURL())),
m_lineNumber(v8Frame->GetLineNumber() - 1),
@@ -192,10 +193,6 @@ const String16& StackFrame::functionName() const { return m_functionName; }
int StackFrame::scriptId() const { return m_scriptId; }
-const String16& StackFrame::scriptIdAsString() const {
- return m_scriptIdAsString;
-}
-
const String16& StackFrame::sourceURL() const { return m_sourceURL; }
int StackFrame::lineNumber() const { return m_lineNumber; }
@@ -324,13 +321,7 @@ int V8StackTraceImpl::topColumnNumber() const {
return m_frames[0]->columnNumber() + 1;
}
-StringView V8StackTraceImpl::topScriptId() const {
- return toStringView(m_frames[0]->scriptIdAsString());
-}
-
-int V8StackTraceImpl::topScriptIdAsInteger() const {
- return m_frames[0]->scriptId();
-}
+int V8StackTraceImpl::topScriptId() const { return m_frames[0]->scriptId(); }
StringView V8StackTraceImpl::topFunctionName() const {
return toStringView(m_frames[0]->functionName());
diff --git a/chromium/v8/src/inspector/v8-stack-trace-impl.h b/chromium/v8/src/inspector/v8-stack-trace-impl.h
index 3896c7371c3..065d5ce47cf 100644
--- a/chromium/v8/src/inspector/v8-stack-trace-impl.h
+++ b/chromium/v8/src/inspector/v8-stack-trace-impl.h
@@ -27,7 +27,6 @@ class StackFrame {
const String16& functionName() const;
int scriptId() const;
- const String16& scriptIdAsString() const;
const String16& sourceURL() const;
int lineNumber() const; // 0-based.
int columnNumber() const; // 0-based.
@@ -38,7 +37,6 @@ class StackFrame {
private:
String16 m_functionName;
int m_scriptId;
- String16 m_scriptIdAsString;
String16 m_sourceURL;
int m_lineNumber; // 0-based.
int m_columnNumber; // 0-based.
@@ -75,8 +73,7 @@ class V8StackTraceImpl : public V8StackTrace {
StringView topSourceURL() const override;
int topLineNumber() const override; // 1-based.
int topColumnNumber() const override; // 1-based.
- StringView topScriptId() const override;
- int topScriptIdAsInteger() const override;
+ int topScriptId() const override;
StringView topFunctionName() const override;
std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject()
const override;
diff --git a/chromium/v8/src/inspector/v8-string-conversions.cc b/chromium/v8/src/inspector/v8-string-conversions.cc
index 4ccf6351fb3..0c75e66b972 100644
--- a/chromium/v8/src/inspector/v8-string-conversions.cc
+++ b/chromium/v8/src/inspector/v8-string-conversions.cc
@@ -228,9 +228,7 @@ static const UChar32 offsetsFromUTF8[6] = {0x00000000UL,
static_cast<UChar32>(0xFA082080UL),
static_cast<UChar32>(0x82082080UL)};
-static inline UChar32 readUTF8Sequence(
- const char*& sequence, // NOLINT(runtime/references)
- size_t length) {
+static inline UChar32 readUTF8Sequence(const char*& sequence, size_t length) {
UChar32 character = 0;
// The cases all fall through.
@@ -336,8 +334,7 @@ ConversionResult convertUTF8ToUTF16(const char** sourceStart,
// Helper to write a three-byte UTF-8 code point to the buffer, caller must
// check room is available.
-static inline void putUTF8Triple(char*& buffer, // NOLINT(runtime/references)
- UChar ch) {
+static inline void putUTF8Triple(char*& buffer, UChar ch) {
*buffer++ = static_cast<char>(((ch >> 12) & 0x0F) | 0xE0);
*buffer++ = static_cast<char>(((ch >> 6) & 0x3F) | 0x80);
*buffer++ = static_cast<char>((ch & 0x3F) | 0x80);
diff --git a/chromium/v8/src/inspector/value-mirror.cc b/chromium/v8/src/inspector/value-mirror.cc
index 18d870a94a3..5348b76a120 100644
--- a/chromium/v8/src/inspector/value-mirror.cc
+++ b/chromium/v8/src/inspector/value-mirror.cc
@@ -165,10 +165,11 @@ String16 abbreviateString(const String16& value, AbbreviateMode mode) {
String16 descriptionForSymbol(v8::Local<v8::Context> context,
v8::Local<v8::Symbol> symbol) {
- return String16::concat("Symbol(",
- toProtocolStringWithTypeCheck(context->GetIsolate(),
- symbol->Description()),
- ")");
+ v8::Isolate* isolate = context->GetIsolate();
+ return String16::concat(
+ "Symbol(",
+ toProtocolStringWithTypeCheck(isolate, symbol->Description(isolate)),
+ ")");
}
String16 descriptionForBigInt(v8::Local<v8::Context> context,
@@ -194,21 +195,19 @@ String16 descriptionForPrimitiveType(v8::Local<v8::Context> context,
return String16();
}
-String16 descriptionForRegExp(v8::Isolate* isolate,
+String16 descriptionForObject(v8::Isolate* isolate,
+ v8::Local<v8::Object> object) {
+ return toProtocolString(isolate, object->GetConstructorName());
+}
+
+String16 descriptionForRegExp(v8::Local<v8::Context> context,
v8::Local<v8::RegExp> value) {
- String16Builder description;
- description.append('/');
- description.append(toProtocolString(isolate, value->GetSource()));
- description.append('/');
- v8::RegExp::Flags flags = value->GetFlags();
- if (flags & v8::RegExp::Flags::kGlobal) description.append('g');
- if (flags & v8::RegExp::Flags::kIgnoreCase) description.append('i');
- if (flags & v8::RegExp::Flags::kLinear) description.append('l');
- if (flags & v8::RegExp::Flags::kMultiline) description.append('m');
- if (flags & v8::RegExp::Flags::kDotAll) description.append('s');
- if (flags & v8::RegExp::Flags::kUnicode) description.append('u');
- if (flags & v8::RegExp::Flags::kSticky) description.append('y');
- return description.toString();
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Local<v8::String> description;
+ if (!value->ToString(context).ToLocal(&description)) {
+ return descriptionForObject(isolate, value);
+ }
+ return toProtocolString(isolate, description);
}
enum class ErrorType { kNative, kClient };
@@ -266,11 +265,6 @@ String16 descriptionForError(v8::Local<v8::Context> context,
return description + stackWithoutMessage;
}
-String16 descriptionForObject(v8::Isolate* isolate,
- v8::Local<v8::Object> object) {
- return toProtocolString(isolate, object->GetConstructorName());
-}
-
String16 descriptionForDate(v8::Local<v8::Context> context,
v8::Local<v8::Date> date) {
v8::Isolate* isolate = context->GetIsolate();
@@ -355,14 +349,9 @@ String16 descriptionForEntry(v8::Local<v8::Context> context,
return key.length() ? ("{" + key + " => " + value + "}") : value;
}
-String16 descriptionForFunction(v8::Local<v8::Context> context,
- v8::Local<v8::Function> value) {
- v8::Isolate* isolate = context->GetIsolate();
- v8::TryCatch tryCatch(isolate);
- v8::Local<v8::String> description;
- if (!value->ToString(context).ToLocal(&description)) {
- return descriptionForObject(isolate, value);
- }
+String16 descriptionForFunction(v8::Local<v8::Function> value) {
+ v8::Isolate* isolate = value->GetIsolate();
+ v8::Local<v8::String> description = v8::debug::GetFunctionDescription(value);
return toProtocolString(isolate, description);
}
@@ -652,7 +641,7 @@ class FunctionMirror final : public ValueMirror {
.setType(RemoteObject::TypeEnum::Function)
.setClassName(toProtocolStringWithTypeCheck(
context->GetIsolate(), m_value->GetConstructorName()))
- .setDescription(descriptionForFunction(context, m_value))
+ .setDescription(descriptionForFunction(m_value))
.build();
}
return Response::Success();
@@ -673,7 +662,7 @@ class FunctionMirror final : public ValueMirror {
*preview =
ObjectPreview::create()
.setType(RemoteObject::TypeEnum::Function)
- .setDescription(descriptionForFunction(context, m_value))
+ .setDescription(descriptionForFunction(m_value))
.setOverflow(false)
.setProperties(std::make_unique<protocol::Array<PropertyPreview>>())
.build();
@@ -1216,7 +1205,6 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
return false;
}
}
- bool shouldSkipProto = internalType == V8InternalValueType::kScopeList;
bool formatAccessorsAsProperties =
clientFor(context)->formatAccessorsAsProperties(object);
@@ -1321,7 +1309,6 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
}
}
if (accessorPropertiesOnly && !isAccessorProperty) continue;
- if (name == "__proto__") shouldSkipProto = true;
auto mirror = PropertyMirror{name,
writable,
configurable,
@@ -1340,16 +1327,6 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
return false;
}
}
- if (!shouldSkipProto && ownProperties && !object->IsProxy() &&
- !accessorPropertiesOnly) {
- v8::Local<v8::Value> prototype = object->GetPrototype();
- if (prototype->IsObject()) {
- accumulator->Add(PropertyMirror{String16("__proto__"), true, true, false,
- true, false,
- ValueMirror::create(context, prototype),
- nullptr, nullptr, nullptr, nullptr});
- }
- }
return true;
}
@@ -1615,7 +1592,7 @@ std::unique_ptr<ValueMirror> ValueMirror::create(v8::Local<v8::Context> context,
if (value->IsRegExp()) {
return std::make_unique<ObjectMirror>(
value, RemoteObject::SubtypeEnum::Regexp,
- descriptionForRegExp(isolate, value.As<v8::RegExp>()));
+ descriptionForRegExp(context, value.As<v8::RegExp>()));
}
if (value->IsProxy()) {
return std::make_unique<ObjectMirror>(
diff --git a/chromium/v8/src/interpreter/bytecode-array-builder.cc b/chromium/v8/src/interpreter/bytecode-array-builder.cc
index 63c07683e6d..23c622b0337 100644
--- a/chromium/v8/src/interpreter/bytecode-array-builder.cc
+++ b/chromium/v8/src/interpreter/bytecode-array-builder.cc
@@ -81,9 +81,8 @@ Register BytecodeArrayBuilder::Local(int index) const {
return Register(index);
}
-template <typename LocalIsolate>
-Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(
- LocalIsolate* isolate) {
+template <typename IsolateT>
+Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(IsolateT* isolate) {
DCHECK(RemainderOfBlockIsDead());
DCHECK(!bytecode_generated_);
bytecode_generated_ = true;
@@ -115,9 +114,9 @@ int BytecodeArrayBuilder::CheckBytecodeMatches(BytecodeArray bytecode) {
}
#endif
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ByteArray> BytecodeArrayBuilder::ToSourcePositionTable(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
DCHECK(RemainderOfBlockIsDead());
return bytecode_array_writer_.ToSourcePositionTable(isolate);
@@ -726,11 +725,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(const AstRawString* name,
DCHECK_EQ(GetTypeofModeFromSlotKind(feedback_vector_spec()->GetKind(
FeedbackVector::ToSlot(feedback_slot))),
typeof_mode);
- if (typeof_mode == INSIDE_TYPEOF) {
- OutputLdaGlobalInsideTypeof(name_index, feedback_slot);
- } else {
- DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- OutputLdaGlobal(name_index, feedback_slot);
+ switch (typeof_mode) {
+ case TypeofMode::kInside:
+ OutputLdaGlobalInsideTypeof(name_index, feedback_slot);
+ break;
+ case TypeofMode::kNotInside:
+ OutputLdaGlobal(name_index, feedback_slot);
+ break;
}
return *this;
}
@@ -775,11 +776,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreContextSlot(Register context,
BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupSlot(
const AstRawString* name, TypeofMode typeof_mode) {
size_t name_index = GetConstantPoolEntry(name);
- if (typeof_mode == INSIDE_TYPEOF) {
- OutputLdaLookupSlotInsideTypeof(name_index);
- } else {
- DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- OutputLdaLookupSlot(name_index);
+ switch (typeof_mode) {
+ case TypeofMode::kInside:
+ OutputLdaLookupSlotInsideTypeof(name_index);
+ break;
+ case TypeofMode::kNotInside:
+ OutputLdaLookupSlot(name_index);
+ break;
}
return *this;
}
@@ -788,11 +791,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupContextSlot(
const AstRawString* name, TypeofMode typeof_mode, int slot_index,
int depth) {
size_t name_index = GetConstantPoolEntry(name);
- if (typeof_mode == INSIDE_TYPEOF) {
- OutputLdaLookupContextSlotInsideTypeof(name_index, slot_index, depth);
- } else {
- DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
- OutputLdaLookupContextSlot(name_index, slot_index, depth);
+ switch (typeof_mode) {
+ case TypeofMode::kInside:
+ OutputLdaLookupContextSlotInsideTypeof(name_index, slot_index, depth);
+ break;
+ case TypeofMode::kNotInside:
+ OutputLdaLookupContextSlot(name_index, slot_index, depth);
+ break;
}
return *this;
}
@@ -801,11 +806,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLookupGlobalSlot(
const AstRawString* name, TypeofMode typeof_mode, int feedback_slot,
int depth) {
size_t name_index = GetConstantPoolEntry(name);
- if (typeof_mode == INSIDE_TYPEOF) {
- OutputLdaLookupGlobalSlotInsideTypeof(name_index, feedback_slot, depth);
- } else {
- DCHECK(typeof_mode == NOT_INSIDE_TYPEOF);
- OutputLdaLookupGlobalSlot(name_index, feedback_slot, depth);
+ switch (typeof_mode) {
+ case TypeofMode::kInside:
+ OutputLdaLookupGlobalSlotInsideTypeof(name_index, feedback_slot, depth);
+ break;
+ case TypeofMode::kNotInside:
+ OutputLdaLookupGlobalSlot(name_index, feedback_slot, depth);
+ break;
}
return *this;
}
diff --git a/chromium/v8/src/interpreter/bytecode-array-builder.h b/chromium/v8/src/interpreter/bytecode-array-builder.h
index 28716b401bb..3d72777c5a4 100644
--- a/chromium/v8/src/interpreter/bytecode-array-builder.h
+++ b/chromium/v8/src/interpreter/bytecode-array-builder.h
@@ -46,12 +46,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final {
BytecodeArrayBuilder(const BytecodeArrayBuilder&) = delete;
BytecodeArrayBuilder& operator=(const BytecodeArrayBuilder&) = delete;
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate);
- template <typename LocalIsolate>
+ Handle<BytecodeArray> ToBytecodeArray(IsolateT* isolate);
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
+ Handle<ByteArray> ToSourcePositionTable(IsolateT* isolate);
#ifdef DEBUG
int CheckBytecodeMatches(BytecodeArray bytecode);
diff --git a/chromium/v8/src/interpreter/bytecode-array-iterator.cc b/chromium/v8/src/interpreter/bytecode-array-iterator.cc
index 2579f5d3782..bb1fdcb95ff 100644
--- a/chromium/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/chromium/v8/src/interpreter/bytecode-array-iterator.cc
@@ -207,9 +207,9 @@ Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Object> BytecodeArrayIterator::GetConstantAtIndex(
- int index, LocalIsolate* isolate) const {
+ int index, IsolateT* isolate) const {
return handle(bytecode_array()->constant_pool().get(index), isolate);
}
@@ -221,9 +221,9 @@ Smi BytecodeArrayIterator::GetConstantAtIndexAsSmi(int index) const {
return Smi::cast(bytecode_array()->constant_pool().get(index));
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
- int operand_index, LocalIsolate* isolate) const {
+ int operand_index, IsolateT* isolate) const {
return GetConstantAtIndex(GetIndexOperand(operand_index), isolate);
}
diff --git a/chromium/v8/src/interpreter/bytecode-array-iterator.h b/chromium/v8/src/interpreter/bytecode-array-iterator.h
index d0c676d2a34..5e93cbccb86 100644
--- a/chromium/v8/src/interpreter/bytecode-array-iterator.h
+++ b/chromium/v8/src/interpreter/bytecode-array-iterator.h
@@ -115,13 +115,13 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
uint32_t GetNativeContextIndexOperand(int operand_index) const;
- template <typename LocalIsolate>
- Handle<Object> GetConstantAtIndex(int offset, LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Handle<Object> GetConstantAtIndex(int offset, IsolateT* isolate) const;
bool IsConstantAtIndexSmi(int offset) const;
Smi GetConstantAtIndexAsSmi(int offset) const;
- template <typename LocalIsolate>
+ template <typename IsolateT>
Handle<Object> GetConstantForIndexOperand(int operand_index,
- LocalIsolate* isolate) const;
+ IsolateT* isolate) const;
// Returns the relative offset of the branch target at the current bytecode.
// It is an error to call this method if the bytecode is not for a jump or
diff --git a/chromium/v8/src/interpreter/bytecode-array-writer.cc b/chromium/v8/src/interpreter/bytecode-array-writer.cc
index 0172d3626b5..2ed8e614bb2 100644
--- a/chromium/v8/src/interpreter/bytecode-array-writer.cc
+++ b/chromium/v8/src/interpreter/bytecode-array-writer.cc
@@ -37,9 +37,9 @@ BytecodeArrayWriter::BytecodeArrayWriter(
bytecodes_.reserve(512); // Derived via experimentation.
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
- LocalIsolate* isolate, int register_count, int parameter_count,
+ IsolateT* isolate, int register_count, int parameter_count,
Handle<ByteArray> handler_table) {
DCHECK_EQ(0, unbound_jumps_);
@@ -63,9 +63,9 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
LocalIsolate* isolate, int register_count, int parameter_count,
Handle<ByteArray> handler_table);
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ByteArray> BytecodeArrayWriter::ToSourcePositionTable(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
DCHECK(!source_position_table_builder_.Lazy());
Handle<ByteArray> source_position_table =
source_position_table_builder_.Omit()
diff --git a/chromium/v8/src/interpreter/bytecode-array-writer.h b/chromium/v8/src/interpreter/bytecode-array-writer.h
index 6517ad9f5e3..9976f59c23b 100644
--- a/chromium/v8/src/interpreter/bytecode-array-writer.h
+++ b/chromium/v8/src/interpreter/bytecode-array-writer.h
@@ -55,15 +55,15 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
void SetFunctionEntrySourcePosition(int position);
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<BytecodeArray> ToBytecodeArray(LocalIsolate* isolate,
- int register_count, int parameter_count,
+ Handle<BytecodeArray> ToBytecodeArray(IsolateT* isolate, int register_count,
+ int parameter_count,
Handle<ByteArray> handler_table);
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<ByteArray> ToSourcePositionTable(LocalIsolate* isolate);
+ Handle<ByteArray> ToSourcePositionTable(IsolateT* isolate);
#ifdef DEBUG
// Returns -1 if they match or the offset of the first mismatching byte.
diff --git a/chromium/v8/src/interpreter/bytecode-generator.cc b/chromium/v8/src/interpreter/bytecode-generator.cc
index 76686a9d62e..a08c42126b6 100644
--- a/chromium/v8/src/interpreter/bytecode-generator.cc
+++ b/chromium/v8/src/interpreter/bytecode-generator.cc
@@ -740,11 +740,11 @@ class V8_NODISCARD BytecodeGenerator::TestResultScope final
// Used to build a list of toplevel declaration data.
class BytecodeGenerator::TopLevelDeclarationsBuilder final : public ZoneObject {
public:
- template <typename LocalIsolate>
+ template <typename IsolateT>
Handle<FixedArray> AllocateDeclarations(UnoptimizedCompilationInfo* info,
BytecodeGenerator* generator,
Handle<Script> script,
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
DCHECK(has_constant_pool_entry_);
Handle<FixedArray> data =
@@ -1187,14 +1187,14 @@ using NullContextScopeFor = typename NullContextScopeHelper<Isolate>::Type;
} // namespace
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
- LocalIsolate* isolate, Handle<Script> script) {
+ IsolateT* isolate, Handle<Script> script) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
- NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
+ NullContextScopeFor<IsolateT> null_context_scope(isolate);
#endif
AllocateDeferredConstants(isolate, script);
@@ -1225,14 +1225,14 @@ template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
template Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(
LocalIsolate* isolate, Handle<Script> script);
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ByteArray> BytecodeGenerator::FinalizeSourcePositionTable(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
#ifdef DEBUG
// Unoptimized compilation should be context-independent. Verify that we don't
// access the native context by nulling it out during finalization.
- NullContextScopeFor<LocalIsolate> null_context_scope(isolate);
+ NullContextScopeFor<IsolateT> null_context_scope(isolate);
#endif
Handle<ByteArray> source_position_table =
@@ -1257,8 +1257,8 @@ int BytecodeGenerator::CheckBytecodeMatches(BytecodeArray bytecode) {
}
#endif
-template <typename LocalIsolate>
-void BytecodeGenerator::AllocateDeferredConstants(LocalIsolate* isolate,
+template <typename IsolateT>
+void BytecodeGenerator::AllocateDeferredConstants(IsolateT* isolate,
Handle<Script> script) {
if (top_level_builder()->has_top_level_declaration()) {
// Build global declaration pair array.
@@ -3289,7 +3289,7 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
break;
}
case VariableLocation::REPL_GLOBAL: {
- DCHECK(variable->IsReplGlobalLet());
+ DCHECK(variable->IsReplGlobal());
FeedbackSlot slot = GetCachedLoadGlobalICSlot(typeof_mode, variable);
builder()->LoadGlobal(variable->raw_name(), feedback_index(slot),
typeof_mode);
@@ -3478,7 +3478,8 @@ void BytecodeGenerator::BuildVariableAssignment(
break;
}
case VariableLocation::REPL_GLOBAL: {
- // A let declaration like 'let x = 7' is effectively translated to:
+ // A let or const declaration like 'let x = 7' is effectively translated
+ // to:
// <top of the script>:
// ScriptContext.x = TheHole;
// ...
@@ -3488,19 +3489,23 @@ void BytecodeGenerator::BuildVariableAssignment(
// The ScriptContext slot for 'x' that we store to here is not
// necessarily the ScriptContext of this script, but rather the
// first ScriptContext that has a slot for name 'x'.
- DCHECK(variable->IsReplGlobalLet());
+ DCHECK(variable->IsReplGlobal());
if (op == Token::INIT) {
RegisterList store_args = register_allocator()->NewRegisterList(2);
builder()
->StoreAccumulatorInRegister(store_args[1])
.LoadLiteral(variable->raw_name())
.StoreAccumulatorInRegister(store_args[0]);
- builder()->CallRuntime(Runtime::kStoreGlobalNoHoleCheckForReplLet,
- store_args);
+ builder()->CallRuntime(
+ Runtime::kStoreGlobalNoHoleCheckForReplLetOrConst, store_args);
} else {
- FeedbackSlot slot =
- GetCachedStoreGlobalICSlot(language_mode(), variable);
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ if (mode == VariableMode::kConst) {
+ builder()->CallRuntime(Runtime::kThrowConstAssignError);
+ } else {
+ FeedbackSlot slot =
+ GetCachedStoreGlobalICSlot(language_mode(), variable);
+ builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ }
}
break;
}
@@ -5420,7 +5425,7 @@ void BytecodeGenerator::VisitForTypeOfValue(Expression* expr) {
// perform a non-contextual load in case the operand is a variable proxy.
VariableProxy* proxy = expr->AsVariableProxy();
BuildVariableLoadForAccumulatorValue(proxy->var(), proxy->hole_check_mode(),
- INSIDE_TYPEOF);
+ TypeofMode::kInside);
} else {
VisitForAccumulatorValue(expr);
}
@@ -6850,7 +6855,7 @@ int BytecodeGenerator::feedback_index(FeedbackSlot slot) const {
FeedbackSlot BytecodeGenerator::GetCachedLoadGlobalICSlot(
TypeofMode typeof_mode, Variable* variable) {
FeedbackSlotCache::SlotKind slot_kind =
- typeof_mode == INSIDE_TYPEOF
+ typeof_mode == TypeofMode::kInside
? FeedbackSlotCache::SlotKind::kLoadGlobalInsideTypeof
: FeedbackSlotCache::SlotKind::kLoadGlobalNotInsideTypeof;
FeedbackSlot slot(feedback_slot_cache()->Get(slot_kind, variable));
diff --git a/chromium/v8/src/interpreter/bytecode-generator.h b/chromium/v8/src/interpreter/bytecode-generator.h
index 69d5bf89576..f3b048d52d2 100644
--- a/chromium/v8/src/interpreter/bytecode-generator.h
+++ b/chromium/v8/src/interpreter/bytecode-generator.h
@@ -37,11 +37,11 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
std::vector<FunctionLiteral*>* eager_inner_literals);
void GenerateBytecode(uintptr_t stack_limit);
- template <typename LocalIsolate>
- Handle<BytecodeArray> FinalizeBytecode(LocalIsolate* isolate,
+ template <typename IsolateT>
+ Handle<BytecodeArray> FinalizeBytecode(IsolateT* isolate,
Handle<Script> script);
- template <typename LocalIsolate>
- Handle<ByteArray> FinalizeSourcePositionTable(LocalIsolate* isolate);
+ template <typename IsolateT>
+ Handle<ByteArray> FinalizeSourcePositionTable(IsolateT* isolate);
#ifdef DEBUG
int CheckBytecodeMatches(BytecodeArray bytecode);
@@ -165,8 +165,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
};
void GenerateBytecodeBody();
- template <typename LocalIsolate>
- void AllocateDeferredConstants(LocalIsolate* isolate, Handle<Script> script);
+ template <typename IsolateT>
+ void AllocateDeferredConstants(IsolateT* isolate, Handle<Script> script);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -243,10 +243,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
const AstRawString* name);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
- TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ TypeofMode typeof_mode = TypeofMode::kNotInside);
void BuildVariableLoadForAccumulatorValue(
Variable* variable, HoleCheckMode hole_check_mode,
- TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
+ TypeofMode typeof_mode = TypeofMode::kNotInside);
void BuildVariableAssignment(
Variable* variable, Token::Value op, HoleCheckMode hole_check_mode,
LookupHoistingMode lookup_hoisting_mode = LookupHoistingMode::kNormal);
diff --git a/chromium/v8/src/interpreter/bytecode-operands.h b/chromium/v8/src/interpreter/bytecode-operands.h
index c9cca226abb..4032980fe8a 100644
--- a/chromium/v8/src/interpreter/bytecode-operands.h
+++ b/chromium/v8/src/interpreter/bytecode-operands.h
@@ -151,7 +151,6 @@ class BytecodeOperands : public AllStatic {
#undef OPERAND_SCALE_COUNT
static constexpr int OperandScaleAsIndex(OperandScale operand_scale) {
-#if V8_HAS_CXX14_CONSTEXPR
#ifdef DEBUG
int result = static_cast<int>(operand_scale) >> 1;
switch (operand_scale) {
@@ -168,7 +167,6 @@ class BytecodeOperands : public AllStatic {
UNREACHABLE();
}
#endif
-#endif
return static_cast<int>(operand_scale) >> 1;
}
diff --git a/chromium/v8/src/interpreter/bytecodes.h b/chromium/v8/src/interpreter/bytecodes.h
index 56d9d5af0d1..e4589918b69 100644
--- a/chromium/v8/src/interpreter/bytecodes.h
+++ b/chromium/v8/src/interpreter/bytecodes.h
@@ -68,7 +68,9 @@ namespace interpreter {
OperandType::kRuntimeId, OperandType::kReg, OperandType::kReg, \
OperandType::kReg) \
\
- /* Loading the accumulator */ \
+ /* Side-effect-free bytecodes -- carefully ordered for efficient checks */ \
+ /* - [Loading the accumulator] */ \
+ V(Ldar, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
V(LdaZero, ImplicitRegisterUse::kWriteAccumulator) \
V(LdaSmi, ImplicitRegisterUse::kWriteAccumulator, OperandType::kImm) \
V(LdaUndefined, ImplicitRegisterUse::kWriteAccumulator) \
@@ -77,6 +79,27 @@ namespace interpreter {
V(LdaTrue, ImplicitRegisterUse::kWriteAccumulator) \
V(LdaFalse, ImplicitRegisterUse::kWriteAccumulator) \
V(LdaConstant, ImplicitRegisterUse::kWriteAccumulator, OperandType::kIdx) \
+ V(LdaContextSlot, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
+ OperandType::kIdx, OperandType::kUImm) \
+ V(LdaImmutableContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kReg, OperandType::kIdx, OperandType::kUImm) \
+ V(LdaCurrentContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx) \
+ V(LdaImmutableCurrentContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
+ OperandType::kIdx) \
+ /* - [Register Loads ] */ \
+ V(Star, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
+ V(Mov, ImplicitRegisterUse::kNone, OperandType::kReg, OperandType::kRegOut) \
+ V(PushContext, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
+ V(PopContext, ImplicitRegisterUse::kNone, OperandType::kReg) \
+ /* - [Test Operations ] */ \
+ V(TestReferenceEqual, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kReg) \
+ V(TestUndetectable, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(TestNull, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(TestUndefined, ImplicitRegisterUse::kReadWriteAccumulator) \
+ V(TestTypeOf, ImplicitRegisterUse::kReadWriteAccumulator, \
+ OperandType::kFlag8) \
\
/* Globals */ \
V(LdaGlobal, ImplicitRegisterUse::kWriteAccumulator, OperandType::kIdx, \
@@ -87,16 +110,6 @@ namespace interpreter {
OperandType::kIdx) \
\
/* Context operations */ \
- V(PushContext, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
- V(PopContext, ImplicitRegisterUse::kNone, OperandType::kReg) \
- V(LdaContextSlot, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
- OperandType::kIdx, OperandType::kUImm) \
- V(LdaImmutableContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
- OperandType::kReg, OperandType::kIdx, OperandType::kUImm) \
- V(LdaCurrentContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
- OperandType::kIdx) \
- V(LdaImmutableCurrentContextSlot, ImplicitRegisterUse::kWriteAccumulator, \
- OperandType::kIdx) \
V(StaContextSlot, ImplicitRegisterUse::kReadAccumulator, OperandType::kReg, \
OperandType::kIdx, OperandType::kUImm) \
V(StaCurrentContextSlot, ImplicitRegisterUse::kReadAccumulator, \
@@ -117,13 +130,6 @@ namespace interpreter {
V(StaLookupSlot, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kIdx, OperandType::kFlag8) \
\
- /* Register-accumulator transfers */ \
- V(Ldar, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
- V(Star, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
- \
- /* Register-register transfers */ \
- V(Mov, ImplicitRegisterUse::kNone, OperandType::kReg, OperandType::kRegOut) \
- \
/* Property loads (LoadIC) operations */ \
V(LdaNamedProperty, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kReg, OperandType::kIdx, OperandType::kIdx) \
@@ -272,7 +278,7 @@ namespace interpreter {
OperandType::kReg, OperandType::kRegList, OperandType::kRegCount, \
OperandType::kIdx) \
\
- /* Test Operators */ \
+ /* Effectful Test Operators */ \
V(TestEqual, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
V(TestEqualStrict, ImplicitRegisterUse::kReadWriteAccumulator, \
@@ -285,17 +291,10 @@ namespace interpreter {
OperandType::kReg, OperandType::kIdx) \
V(TestGreaterThanOrEqual, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kIdx) \
- V(TestReferenceEqual, ImplicitRegisterUse::kReadWriteAccumulator, \
- OperandType::kReg) \
V(TestInstanceOf, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kReg, OperandType::kIdx) \
V(TestIn, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kReg, \
OperandType::kIdx) \
- V(TestUndetectable, ImplicitRegisterUse::kReadWriteAccumulator) \
- V(TestNull, ImplicitRegisterUse::kReadWriteAccumulator) \
- V(TestUndefined, ImplicitRegisterUse::kReadWriteAccumulator) \
- V(TestTypeOf, ImplicitRegisterUse::kReadWriteAccumulator, \
- OperandType::kFlag8) \
\
/* Cast operators */ \
V(ToName, ImplicitRegisterUse::kReadAccumulator, OperandType::kRegOut) \
@@ -650,25 +649,17 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Return true if |bytecode| is an accumulator load without effects,
// e.g. LdaConstant, LdaTrue, Ldar.
static constexpr bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
- return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
- bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
- bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
- bytecode == Bytecode::kLdaUndefined ||
- bytecode == Bytecode::kLdaTheHole ||
- bytecode == Bytecode::kLdaConstant ||
- bytecode == Bytecode::kLdaContextSlot ||
- bytecode == Bytecode::kLdaCurrentContextSlot ||
- bytecode == Bytecode::kLdaImmutableContextSlot ||
- bytecode == Bytecode::kLdaImmutableCurrentContextSlot;
+ STATIC_ASSERT(Bytecode::kLdar < Bytecode::kLdaImmutableCurrentContextSlot);
+ return bytecode >= Bytecode::kLdar &&
+ bytecode <= Bytecode::kLdaImmutableCurrentContextSlot;
}
// Returns true if |bytecode| is a compare operation without external effects
// (e.g., Type cooersion).
static constexpr bool IsCompareWithoutEffects(Bytecode bytecode) {
- return bytecode == Bytecode::kTestUndetectable ||
- bytecode == Bytecode::kTestNull ||
- bytecode == Bytecode::kTestUndefined ||
- bytecode == Bytecode::kTestTypeOf;
+ STATIC_ASSERT(Bytecode::kTestReferenceEqual < Bytecode::kTestTypeOf);
+ return bytecode >= Bytecode::kTestReferenceEqual &&
+ bytecode <= Bytecode::kTestTypeOf;
}
static constexpr bool IsShortStar(Bytecode bytecode) {
@@ -683,8 +674,8 @@ class V8_EXPORT_PRIVATE Bytecodes final : public AllStatic {
// Return true if |bytecode| is a register load without effects,
// e.g. Mov, Star.
static constexpr bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
- return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
- bytecode == Bytecode::kPushContext || IsAnyStar(bytecode);
+ return IsShortStar(bytecode) ||
+ (bytecode >= Bytecode::kStar && bytecode <= Bytecode::kPopContext);
}
// Returns true if the bytecode is a conditional jump taking
diff --git a/chromium/v8/src/interpreter/constant-array-builder.cc b/chromium/v8/src/interpreter/constant-array-builder.cc
index 4142d3a7cac..260a75448d9 100644
--- a/chromium/v8/src/interpreter/constant-array-builder.cc
+++ b/chromium/v8/src/interpreter/constant-array-builder.cc
@@ -65,9 +65,9 @@ const ConstantArrayBuilder::Entry& ConstantArrayBuilder::ConstantArraySlice::At(
}
#if DEBUG
-template <typename LocalIsolate>
+template <typename IsolateT>
void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
- LocalIsolate* isolate) const {
+ IsolateT* isolate) const {
std::set<Smi> smis;
std::set<double> heap_numbers;
std::set<const AstRawString*> strings;
@@ -164,9 +164,9 @@ ConstantArrayBuilder::ConstantArraySlice* ConstantArrayBuilder::IndexToSlice(
UNREACHABLE();
}
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
- LocalIsolate* isolate) const {
+ IsolateT* isolate) const {
const ConstantArraySlice* slice = IndexToSlice(index);
DCHECK_LT(index, slice->capacity());
if (index < slice->start_index() + slice->size()) {
@@ -183,8 +183,8 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
MaybeHandle<Object> ConstantArrayBuilder::At(size_t index,
LocalIsolate* isolate) const;
-template <typename LocalIsolate>
-Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(LocalIsolate* isolate) {
+template <typename IsolateT>
+Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(IsolateT* isolate) {
Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArrayWithHoles(
static_cast<int>(size()), AllocationType::kOld);
int array_index = 0;
@@ -372,9 +372,8 @@ void ConstantArrayBuilder::DiscardReservedEntry(OperandSize operand_size) {
OperandSizeToSlice(operand_size)->Unreserve();
}
-template <typename LocalIsolate>
-Handle<Object> ConstantArrayBuilder::Entry::ToHandle(
- LocalIsolate* isolate) const {
+template <typename IsolateT>
+Handle<Object> ConstantArrayBuilder::Entry::ToHandle(IsolateT* isolate) const {
switch (tag_) {
case Tag::kDeferred:
// We shouldn't have any deferred entries by now.
diff --git a/chromium/v8/src/interpreter/constant-array-builder.h b/chromium/v8/src/interpreter/constant-array-builder.h
index b17995f0a16..2120142a12b 100644
--- a/chromium/v8/src/interpreter/constant-array-builder.h
+++ b/chromium/v8/src/interpreter/constant-array-builder.h
@@ -52,16 +52,16 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
explicit ConstantArrayBuilder(Zone* zone);
// Generate a fixed array of constant handles based on inserted objects.
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<FixedArray> ToFixedArray(LocalIsolate* isolate);
+ Handle<FixedArray> ToFixedArray(IsolateT* isolate);
// Returns the object, as a handle in |isolate|, that is in the constant pool
// array at index |index|. Returns null if there is no handle at this index.
// Only expected to be used in tests.
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- MaybeHandle<Object> At(size_t index, LocalIsolate* isolate) const;
+ MaybeHandle<Object> At(size_t index, IsolateT* isolate) const;
// Returns the number of elements in the array.
size_t size() const;
@@ -154,8 +154,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
smi_ = smi;
}
- template <typename LocalIsolate>
- Handle<Object> ToHandle(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Handle<Object> ToHandle(IsolateT* isolate) const;
private:
explicit Entry(Tag tag) : tag_(tag) {}
@@ -207,8 +207,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final {
const Entry& At(size_t index) const;
#if DEBUG
- template <typename LocalIsolate>
- void CheckAllElementsAreUnique(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ void CheckAllElementsAreUnique(IsolateT* isolate) const;
#endif
inline size_t available() const { return capacity() - reserved() - size(); }
diff --git a/chromium/v8/src/interpreter/handler-table-builder.cc b/chromium/v8/src/interpreter/handler-table-builder.cc
index 56f5b849dc6..1a88b2ab073 100644
--- a/chromium/v8/src/interpreter/handler-table-builder.cc
+++ b/chromium/v8/src/interpreter/handler-table-builder.cc
@@ -15,8 +15,8 @@ namespace interpreter {
HandlerTableBuilder::HandlerTableBuilder(Zone* zone) : entries_(zone) {}
-template <typename LocalIsolate>
-Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(LocalIsolate* isolate) {
+template <typename IsolateT>
+Handle<ByteArray> HandlerTableBuilder::ToHandlerTable(IsolateT* isolate) {
int handler_table_size = static_cast<int>(entries_.size());
Handle<ByteArray> table_byte_array = isolate->factory()->NewByteArray(
HandlerTable::LengthForRange(handler_table_size), AllocationType::kOld);
diff --git a/chromium/v8/src/interpreter/handler-table-builder.h b/chromium/v8/src/interpreter/handler-table-builder.h
index f5f264d7c7e..8670fc04924 100644
--- a/chromium/v8/src/interpreter/handler-table-builder.h
+++ b/chromium/v8/src/interpreter/handler-table-builder.h
@@ -30,8 +30,8 @@ class V8_EXPORT_PRIVATE HandlerTableBuilder final {
// Builds the actual handler table by copying the current values into a heap
// object. Any further mutations to the builder won't be reflected.
- template <typename LocalIsolate>
- Handle<ByteArray> ToHandlerTable(LocalIsolate* isolate);
+ template <typename IsolateT>
+ Handle<ByteArray> ToHandlerTable(IsolateT* isolate);
// Creates a new handler table entry and returns a {hander_id} identifying the
// entry, so that it can be referenced by below setter functions.
diff --git a/chromium/v8/src/interpreter/interpreter-assembler.cc b/chromium/v8/src/interpreter/interpreter-assembler.cc
index df5b525877d..f6733def8e6 100644
--- a/chromium/v8/src/interpreter/interpreter-assembler.cc
+++ b/chromium/v8/src/interpreter/interpreter-assembler.cc
@@ -8,7 +8,7 @@
#include <ostream>
#include "src/codegen/code-factory.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/execution/frames.h"
#include "src/interpreter/bytecodes.h"
@@ -803,7 +803,9 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
TNode<UintPtrT> slot_id, TNode<HeapObject> maybe_feedback_vector) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), ConvertReceiverMode::kAny);
- CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
+ LazyNode<Object> receiver = [=] { return LoadRegisterAtOperandIndex(1); };
+ CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
+ slot_id);
Comment("call using CallWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), ConvertReceiverMode::kAny,
@@ -1310,26 +1312,6 @@ void InterpreterAssembler::AbortIfWordNotEqual(TNode<WordT> lhs,
BIND(&ok);
}
-void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
- TNode<ExternalReference> restart_fp_address =
- ExternalConstant(ExternalReference::debug_restart_fp_address(isolate()));
-
- TNode<IntPtrT> restart_fp = Load<IntPtrT>(restart_fp_address);
- TNode<IntPtrT> null = IntPtrConstant(0);
-
- Label ok(this), drop_frames(this);
- Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
-
- BIND(&drop_frames);
- // We don't expect this call to return since the frame dropper tears down
- // the stack and jumps into the function on the target frame to restart it.
- CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
- Abort(AbortReason::kUnexpectedReturnFromFrameDropper);
- Goto(&ok);
-
- BIND(&ok);
-}
-
void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
TNode<IntPtrT> relative_jump) {
TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
diff --git a/chromium/v8/src/interpreter/interpreter-assembler.h b/chromium/v8/src/interpreter/interpreter-assembler.h
index 019fd40f3bf..bf4641200bb 100644
--- a/chromium/v8/src/interpreter/interpreter-assembler.h
+++ b/chromium/v8/src/interpreter/interpreter-assembler.h
@@ -241,9 +241,6 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
TNode<FixedArrayBase> parameters_and_registers,
TNode<IntPtrT> formal_parameter_count, TNode<UintPtrT> register_count);
- // Dispatch to frame dropper trampoline if necessary.
- void MaybeDropFrames(TNode<Context> context);
-
// Perform OnStackReplacement.
void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump);
diff --git a/chromium/v8/src/interpreter/interpreter-generator.cc b/chromium/v8/src/interpreter/interpreter-generator.cc
index cb01348a351..75027b96b7d 100644
--- a/chromium/v8/src/interpreter/interpreter-generator.cc
+++ b/chromium/v8/src/interpreter/interpreter-generator.cc
@@ -209,7 +209,7 @@ IGNITION_HANDLER(LdaGlobal, InterpreterLoadGlobalAssembler) {
static const int kNameOperandIndex = 0;
static const int kSlotOperandIndex = 1;
- LdaGlobal(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF);
+ LdaGlobal(kSlotOperandIndex, kNameOperandIndex, TypeofMode::kNotInside);
}
// LdaGlobalInsideTypeof <name_index> <slot>
@@ -220,7 +220,7 @@ IGNITION_HANDLER(LdaGlobalInsideTypeof, InterpreterLoadGlobalAssembler) {
static const int kNameOperandIndex = 0;
static const int kSlotOperandIndex = 1;
- LdaGlobal(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF);
+ LdaGlobal(kSlotOperandIndex, kNameOperandIndex, TypeofMode::kInside);
}
// StaGlobal <name_index> <slot>
@@ -418,8 +418,8 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
TypeofMode typeof_mode =
function_id == Runtime::kLoadLookupSlotInsideTypeof
- ? INSIDE_TYPEOF
- : NOT_INSIDE_TYPEOF;
+ ? TypeofMode::kInside
+ : TypeofMode::kNotInside;
LdaGlobal(kSlotOperandIndex, kNameOperandIndex, typeof_mode);
}
@@ -1365,13 +1365,19 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
// Generates code to perform a JS call that collects type feedback.
void JSCall(ConvertReceiverMode receiver_mode) {
TNode<Object> function = LoadRegisterAtOperandIndex(0);
+ LazyNode<Object> receiver = [=] {
+ return receiver_mode == ConvertReceiverMode::kNullOrUndefined
+ ? UndefinedConstant()
+ : LoadRegisterAtOperandIndex(1);
+ };
RegListNodePair args = GetRegisterListAtOperandIndex(1);
TNode<UintPtrT> slot_id = BytecodeOperandIdx(3);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
// Collect the {function} feedback.
- CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
+ CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
+ slot_id);
// Call the function and dispatch to the next handler.
CallJSAndDispatch(function, context, args, receiver_mode);
@@ -1399,12 +1405,18 @@ class InterpreterJSCallAssembler : public InterpreterAssembler {
kFirstArgumentOperandIndex + kReceiverAndArgOperandCount;
TNode<Object> function = LoadRegisterAtOperandIndex(0);
+ LazyNode<Object> receiver = [=] {
+ return receiver_mode == ConvertReceiverMode::kNullOrUndefined
+ ? UndefinedConstant()
+ : LoadRegisterAtOperandIndex(1);
+ };
TNode<UintPtrT> slot_id = BytecodeOperandIdx(kSlotOperandIndex);
TNode<HeapObject> maybe_feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
// Collect the {function} feedback.
- CollectCallFeedback(function, context, maybe_feedback_vector, slot_id);
+ CollectCallFeedback(function, receiver, context, maybe_feedback_vector,
+ slot_id);
switch (kReceiverAndArgOperandCount) {
case 0:
@@ -2776,7 +2788,7 @@ IGNITION_HANDLER(ThrowIfNotSuperConstructor, InterpreterAssembler) {
// Call runtime to handle debugger statement.
IGNITION_HANDLER(Debugger, InterpreterAssembler) {
TNode<Context> context = GetContext();
- CallStub(CodeFactory::HandleDebuggerStatement(isolate()), context);
+ CallRuntime(Runtime::kHandleDebuggerStatement, context);
Dispatch();
}
@@ -2791,7 +2803,6 @@ IGNITION_HANDLER(Debugger, InterpreterAssembler) {
Runtime::kDebugBreakOnBytecode, context, accumulator); \
TNode<Object> return_value = Projection<0>(result_pair); \
TNode<IntPtrT> original_bytecode = SmiUntag(Projection<1>(result_pair)); \
- MaybeDropFrames(context); \
SetAccumulator(return_value); \
DispatchToBytecodeWithOptionalStarLookahead(original_bytecode); \
}
diff --git a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc
index b6ea44f6e73..c98f78f8788 100644
--- a/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/chromium/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -213,12 +213,6 @@ TNode<Object> IntrinsicsGenerator::HasProperty(
arg_count);
}
-TNode<Object> IntrinsicsGenerator::ToString(
- const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
- int arg_count) {
- return IntrinsicAsBuiltinCall(args, context, Builtins::kToString, arg_count);
-}
-
TNode<Object> IntrinsicsGenerator::ToLength(
const InterpreterAssembler::RegListNodePair& args, TNode<Context> context,
int arg_count) {
diff --git a/chromium/v8/src/interpreter/interpreter-intrinsics.h b/chromium/v8/src/interpreter/interpreter-intrinsics.h
index 07c60270c2a..85c54f67f11 100644
--- a/chromium/v8/src/interpreter/interpreter-intrinsics.h
+++ b/chromium/v8/src/interpreter/interpreter-intrinsics.h
@@ -36,7 +36,6 @@ namespace interpreter {
V(IsArray, is_array, 1) \
V(IsJSReceiver, is_js_receiver, 1) \
V(IsSmi, is_smi, 1) \
- V(ToString, to_string, 1) \
V(ToLength, to_length, 1) \
V(ToObject, to_object, 1)
diff --git a/chromium/v8/src/interpreter/interpreter.cc b/chromium/v8/src/interpreter/interpreter.cc
index ddce0f0e4eb..36f2c546146 100644
--- a/chromium/v8/src/interpreter/interpreter.cc
+++ b/chromium/v8/src/interpreter/interpreter.cc
@@ -18,7 +18,6 @@
#include "src/init/setup-isolate.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/bytecodes.h"
-#include "src/logging/counters-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots.h"
@@ -50,14 +49,13 @@ class InterpreterCompilationJob final : public UnoptimizedCompilationJob {
private:
BytecodeGenerator* generator() { return &generator_; }
- template <typename LocalIsolate>
- void CheckAndPrintBytecodeMismatch(LocalIsolate* isolate,
- Handle<Script> script,
+ template <typename IsolateT>
+ void CheckAndPrintBytecodeMismatch(IsolateT* isolate, Handle<Script> script,
Handle<BytecodeArray> bytecode);
- template <typename LocalIsolate>
+ template <typename IsolateT>
Status DoFinalizeJobImpl(Handle<SharedFunctionInfo> shared_info,
- LocalIsolate* isolate);
+ IsolateT* isolate);
Zone zone_;
UnoptimizedCompilationInfo compilation_info_;
@@ -177,10 +175,9 @@ InterpreterCompilationJob::InterpreterCompilationJob(
eager_inner_literals) {}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
- RuntimeCallTimerScope runtimeTimerScope(
- parse_info()->runtime_call_stats(),
- RuntimeCallCounterId::kCompileIgnition,
- RuntimeCallStats::kThreadSpecific);
+ RCS_SCOPE(parse_info()->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileIgnition,
+ RuntimeCallStats::kThreadSpecific);
// TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
@@ -203,10 +200,9 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
}
#ifdef DEBUG
-template <typename LocalIsolate>
+template <typename IsolateT>
void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
- LocalIsolate* isolate, Handle<Script> script,
- Handle<BytecodeArray> bytecode) {
+ IsolateT* isolate, Handle<Script> script, Handle<BytecodeArray> bytecode) {
int first_mismatch = generator()->CheckBytecodeMatches(*bytecode);
if (first_mismatch >= 0) {
parse_info()->ast_value_factory()->Internalize(isolate);
@@ -243,9 +239,8 @@ void InterpreterCompilationJob::CheckAndPrintBytecodeMismatch(
InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
- RuntimeCallTimerScope runtimeTimerScope(
- parse_info()->runtime_call_stats(),
- RuntimeCallCounterId::kCompileIgnitionFinalization);
+ RCS_SCOPE(parse_info()->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileIgnitionFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileIgnitionFinalization");
return DoFinalizeJobImpl(shared_info, isolate);
@@ -253,17 +248,16 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl(
Handle<SharedFunctionInfo> shared_info, LocalIsolate* isolate) {
- RuntimeCallTimerScope runtimeTimerScope(
- parse_info()->runtime_call_stats(),
- RuntimeCallCounterId::kCompileBackgroundIgnitionFinalization);
+ RCS_SCOPE(parse_info()->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileBackgroundIgnitionFinalization);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileIgnitionFinalization");
return DoFinalizeJobImpl(shared_info, isolate);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
InterpreterCompilationJob::Status InterpreterCompilationJob::DoFinalizeJobImpl(
- Handle<SharedFunctionInfo> shared_info, LocalIsolate* isolate) {
+ Handle<SharedFunctionInfo> shared_info, IsolateT* isolate) {
Handle<BytecodeArray> bytecodes = compilation_info_.bytecode_array();
if (bytecodes.is_null()) {
bytecodes = generator()->FinalizeBytecode(
diff --git a/chromium/v8/src/json/json-parser.cc b/chromium/v8/src/json/json-parser.cc
index ccea49e89fa..7d350d02e54 100644
--- a/chromium/v8/src/json/json-parser.cc
+++ b/chromium/v8/src/json/json-parser.cc
@@ -10,6 +10,7 @@
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
+#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property-descriptor.h"
#include "src/strings/char-predicates-inl.h"
@@ -509,17 +510,18 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
}
Handle<FieldType> value_type =
value->OptimalType(isolate(), representation);
- Map::GeneralizeField(isolate(), target, descriptor_index,
- details.constness(), representation, value_type);
+ MapUpdater::GeneralizeField(isolate(), target, descriptor_index,
+ details.constness(), representation,
+ value_type);
} else if (expected_representation.IsHeapObject() &&
!target->instance_descriptors(isolate())
.GetFieldType(descriptor_index)
.NowContains(value)) {
Handle<FieldType> value_type =
value->OptimalType(isolate(), expected_representation);
- Map::GeneralizeField(isolate(), target, descriptor_index,
- details.constness(), expected_representation,
- value_type);
+ MapUpdater::GeneralizeField(isolate(), target, descriptor_index,
+ details.constness(), expected_representation,
+ value_type);
} else if (expected_representation.IsDouble() && value->IsSmi()) {
new_mutable_double++;
}
@@ -601,8 +603,8 @@ Handle<Object> JsonParser<Char>::BuildJsonObject(
mutable_double_address += kMutableDoubleSize;
} else {
DCHECK(value.IsHeapNumber());
- HeapObject::cast(value).synchronized_set_map(
- *factory()->heap_number_map());
+ HeapObject::cast(value).set_map(*factory()->heap_number_map(),
+ kReleaseStore);
}
}
object->RawFastInobjectPropertyAtPut(index, value, mode);
diff --git a/chromium/v8/src/libplatform/tracing/recorder-default.cc b/chromium/v8/src/libplatform/tracing/recorder-default.cc
deleted file mode 100644
index 46e0cbb8e26..00000000000
--- a/chromium/v8/src/libplatform/tracing/recorder-default.cc
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef V8_LIBPLATFORM_TRACING_RECORDER_DEFAULT_H_
-#define V8_LIBPLATFORM_TRACING_RECORDER_DEFAULT_H_
-
-#include "src/libplatform/tracing/recorder.h"
-
-namespace v8 {
-namespace platform {
-namespace tracing {
-
-Recorder::Recorder() {}
-Recorder::~Recorder() {}
-
-bool Recorder::IsEnabled() { return false; }
-bool Recorder::IsEnabled(const uint8_t level) { return false; }
-
-void Recorder::AddEvent(TraceObject* trace_event) {}
-
-} // namespace tracing
-} // namespace platform
-} // namespace v8
-
-#endif // V8_LIBPLATFORM_TRACING_RECORDER_DEFAULT_H_
diff --git a/chromium/v8/src/libplatform/tracing/recorder-mac.cc b/chromium/v8/src/libplatform/tracing/recorder-mac.cc
new file mode 100644
index 00000000000..6a6689f8cf1
--- /dev/null
+++ b/chromium/v8/src/libplatform/tracing/recorder-mac.cc
@@ -0,0 +1,43 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#ifndef V8_LIBPLATFORM_TRACING_RECORDER_MAC_H_
+#define V8_LIBPLATFORM_TRACING_RECORDER_MAC_H_
+
+#include "src/libplatform/tracing/recorder.h"
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+
+namespace v8 {
+namespace platform {
+namespace tracing {
+
+Recorder::Recorder() { v8Provider = os_log_create("v8", ""); }
+Recorder::~Recorder() {}
+
+bool Recorder::IsEnabled() {
+ return os_log_type_enabled(v8Provider, OS_LOG_TYPE_DEFAULT);
+}
+bool Recorder::IsEnabled(const uint8_t level) {
+ if (level == OS_LOG_TYPE_DEFAULT || level == OS_LOG_TYPE_INFO ||
+ level == OS_LOG_TYPE_DEBUG || level == OS_LOG_TYPE_ERROR ||
+ level == OS_LOG_TYPE_FAULT) {
+ return os_log_type_enabled(v8Provider, static_cast<os_log_type_t>(level));
+ }
+ return false;
+}
+
+void Recorder::AddEvent(TraceObject* trace_event) {
+ os_signpost_event_emit(v8Provider, OS_SIGNPOST_ID_EXCLUSIVE, "",
+ "%s, cpu_duration: %d", trace_event->name(),
+ static_cast<int>(trace_event->cpu_duration()));
+}
+
+} // namespace tracing
+} // namespace platform
+} // namespace v8
+
+#pragma clang diagnostic pop
+
+#endif // V8_LIBPLATFORM_TRACING_RECORDER_MAC_H_
diff --git a/chromium/v8/src/libplatform/tracing/recorder.h b/chromium/v8/src/libplatform/tracing/recorder.h
index 31cc75f9bd8..8b8eb0e0e91 100644
--- a/chromium/v8/src/libplatform/tracing/recorder.h
+++ b/chromium/v8/src/libplatform/tracing/recorder.h
@@ -9,6 +9,16 @@
#include "include/libplatform/v8-tracing.h"
+#if !defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
+#error "only include this file if V8_ENABLE_SYSTEM_INSTRUMENTATION"
+#endif
+
+#if V8_OS_MACOSX
+#include <os/signpost.h>
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunguarded-availability"
+#endif
+
#if V8_OS_WIN
#ifndef V8_ETW_GUID
#define V8_ETW_GUID \
@@ -31,14 +41,23 @@ class V8_PLATFORM_EXPORT Recorder {
Recorder();
~Recorder();
- static bool IsEnabled();
- static bool IsEnabled(const uint8_t level);
+ bool IsEnabled();
+ bool IsEnabled(const uint8_t level);
void AddEvent(TraceObject* trace_event);
+
+ private:
+#if V8_OS_MACOSX
+ os_log_t v8Provider;
+#endif
};
} // namespace tracing
} // namespace platform
} // namespace v8
+#if V8_OS_MACOSX
+#pragma clang diagnostic pop
+#endif
+
#endif // V8_LIBPLATFORM_TRACING_RECORDER_H_
diff --git a/chromium/v8/src/libplatform/tracing/trace-writer.cc b/chromium/v8/src/libplatform/tracing/trace-writer.cc
index 5740dabd8f2..1131522eec4 100644
--- a/chromium/v8/src/libplatform/tracing/trace-writer.cc
+++ b/chromium/v8/src/libplatform/tracing/trace-writer.cc
@@ -9,7 +9,10 @@
#include "base/trace_event/common/trace_event_common.h"
#include "include/v8-platform.h"
#include "src/base/platform/platform.h"
+
+#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
#include "src/libplatform/tracing/recorder.h"
+#endif
namespace v8 {
namespace platform {
@@ -191,6 +194,7 @@ TraceWriter* TraceWriter::CreateJSONTraceWriter(std::ostream& stream,
return new JSONTraceWriter(stream, tag);
}
+#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
SystemInstrumentationTraceWriter::SystemInstrumentationTraceWriter() {
recorder_ = std::make_unique<Recorder>();
}
@@ -211,6 +215,7 @@ void SystemInstrumentationTraceWriter::Flush() {}
TraceWriter* TraceWriter::CreateSystemInstrumentationTraceWriter() {
return new SystemInstrumentationTraceWriter();
}
+#endif
} // namespace tracing
} // namespace platform
diff --git a/chromium/v8/src/libplatform/tracing/trace-writer.h b/chromium/v8/src/libplatform/tracing/trace-writer.h
index 1f727b815a5..2fada451080 100644
--- a/chromium/v8/src/libplatform/tracing/trace-writer.h
+++ b/chromium/v8/src/libplatform/tracing/trace-writer.h
@@ -29,6 +29,7 @@ class JSONTraceWriter : public TraceWriter {
bool append_comma_ = false;
};
+#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
class SystemInstrumentationTraceWriter : public TraceWriter {
public:
SystemInstrumentationTraceWriter();
@@ -39,6 +40,7 @@ class SystemInstrumentationTraceWriter : public TraceWriter {
private:
std::unique_ptr<Recorder> recorder_;
};
+#endif
} // namespace tracing
} // namespace platform
diff --git a/chromium/v8/src/logging/code-events.h b/chromium/v8/src/logging/code-events.h
index c009ba0b15d..cda7c39fc7c 100644
--- a/chromium/v8/src/logging/code-events.h
+++ b/chromium/v8/src/logging/code-events.h
@@ -98,6 +98,7 @@ class CodeEventListener {
// Not handlified as this happens during GC. No allocation allowed.
virtual void CodeMoveEvent(AbstractCode from, AbstractCode to) = 0;
virtual void SharedFunctionInfoMoveEvent(Address from, Address to) = 0;
+ virtual void NativeContextMoveEvent(Address from, Address to) = 0;
virtual void CodeMovingGCEvent() = 0;
virtual void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) = 0;
@@ -217,6 +218,11 @@ class CodeEventDispatcher : public CodeEventListener {
listener->SharedFunctionInfoMoveEvent(from, to);
});
}
+ void NativeContextMoveEvent(Address from, Address to) override {
+ DispatchEventToListeners([=](CodeEventListener* listener) {
+ listener->NativeContextMoveEvent(from, to);
+ });
+ }
void CodeMovingGCEvent() override {
DispatchEventToListeners(
[](CodeEventListener* listener) { listener->CodeMovingGCEvent(); });
diff --git a/chromium/v8/src/logging/counters-definitions.h b/chromium/v8/src/logging/counters-definitions.h
index ffa9647719d..ebf6e3f430a 100644
--- a/chromium/v8/src/logging/counters-definitions.h
+++ b/chromium/v8/src/logging/counters-definitions.h
@@ -85,6 +85,11 @@ namespace internal {
HR(wasm_modules_per_engine, V8.WasmModulesPerEngine, 1, 1024, 30) \
/* bailout reason if Liftoff failed, or {kSuccess} (per function) */ \
HR(liftoff_bailout_reasons, V8.LiftoffBailoutReasons, 0, 20, 21) \
+ /* support for PKEYs/PKU by testing result of pkey_alloc() */ \
+ /* TODO(chromium:1207318): Only values 0 and 1 are actually used, but 3 */ \
+ /* buckets needed until {BooleanHistogram} is supported in Chromium UMA. */ \
+ HR(wasm_memory_protection_keys_support, V8.WasmMemoryProtectionKeysSupport, \
+ 0, 2, 3) \
/* number of thrown exceptions per isolate */ \
HR(wasm_throw_count, V8.WasmThrowCount, 0, 100000, 30) \
/* number of rethrown exceptions per isolate */ \
diff --git a/chromium/v8/src/logging/counters-inl.h b/chromium/v8/src/logging/counters-inl.h
deleted file mode 100644
index 3e067c7c060..00000000000
--- a/chromium/v8/src/logging/counters-inl.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_LOGGING_COUNTERS_INL_H_
-#define V8_LOGGING_COUNTERS_INL_H_
-
-#include "src/logging/counters.h"
-#include "src/logging/tracing-flags.h"
-
-namespace v8 {
-namespace internal {
-
-void RuntimeCallTimer::Start(RuntimeCallCounter* counter,
- RuntimeCallTimer* parent) {
- DCHECK(!IsStarted());
- counter_ = counter;
- parent_.SetValue(parent);
- if (TracingFlags::runtime_stats.load(std::memory_order_relaxed) ==
- v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
- return;
- }
- base::TimeTicks now = RuntimeCallTimer::Now();
- if (parent) parent->Pause(now);
- Resume(now);
- DCHECK(IsStarted());
-}
-
-void RuntimeCallTimer::Pause(base::TimeTicks now) {
- DCHECK(IsStarted());
- elapsed_ += (now - start_ticks_);
- start_ticks_ = base::TimeTicks();
-}
-
-void RuntimeCallTimer::Resume(base::TimeTicks now) {
- DCHECK(!IsStarted());
- start_ticks_ = now;
-}
-
-RuntimeCallTimer* RuntimeCallTimer::Stop() {
- if (!IsStarted()) return parent();
- base::TimeTicks now = RuntimeCallTimer::Now();
- Pause(now);
- counter_->Increment();
- CommitTimeToCounter();
-
- RuntimeCallTimer* parent_timer = parent();
- if (parent_timer) {
- parent_timer->Resume(now);
- }
- return parent_timer;
-}
-
-void RuntimeCallTimer::CommitTimeToCounter() {
- counter_->Add(elapsed_);
- elapsed_ = base::TimeDelta();
-}
-
-bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_LOGGING_COUNTERS_INL_H_
diff --git a/chromium/v8/src/logging/counters.cc b/chromium/v8/src/logging/counters.cc
index c9c9aa0ebe5..3ffced08324 100644
--- a/chromium/v8/src/logging/counters.cc
+++ b/chromium/v8/src/logging/counters.cc
@@ -4,16 +4,13 @@
#include "src/logging/counters.h"
-#include <iomanip>
-
-#include "src/base/platform/platform.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
#include "src/builtins/builtins-definitions.h"
#include "src/execution/isolate.h"
-#include "src/logging/counters-inl.h"
#include "src/logging/log-inl.h"
#include "src/logging/log.h"
-#include "src/utils/ostreams.h"
namespace v8 {
namespace internal {
@@ -121,15 +118,16 @@ void TimedHistogram::RecordAbandon(base::ElapsedTimer* timer,
}
Counters::Counters(Isolate* isolate)
- : isolate_(isolate),
- stats_table_(this),
-// clang format off
+ :
#define SC(name, caption) name##_(this, "c:" #caption),
STATS_COUNTER_TS_LIST(SC)
#undef SC
- // clang format on
- runtime_call_stats_(RuntimeCallStats::kMainIsolateThread),
- worker_thread_runtime_call_stats_() {
+#ifdef V8_RUNTIME_CALL_STATS
+ runtime_call_stats_(RuntimeCallStats::kMainIsolateThread),
+ worker_thread_runtime_call_stats_(),
+#endif
+ isolate_(isolate),
+ stats_table_(this) {
static const struct {
Histogram Counters::*member;
const char* caption;
@@ -319,355 +317,5 @@ void Counters::ResetCreateHistogramFunction(CreateHistogramCallback f) {
#undef HM
}
-base::TimeTicks (*RuntimeCallTimer::Now)() =
- &base::TimeTicks::HighResolutionNow;
-
-base::TimeTicks RuntimeCallTimer::NowCPUTime() {
- base::ThreadTicks ticks = base::ThreadTicks::Now();
- return base::TimeTicks::FromInternalValue(ticks.ToInternalValue());
-}
-
-class RuntimeCallStatEntries {
- public:
- void Print(std::ostream& os) {
- if (total_call_count == 0) return;
- std::sort(entries.rbegin(), entries.rend());
- os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(12)
- << "Time" << std::setw(18) << "Count" << std::endl
- << std::string(88, '=') << std::endl;
- for (Entry& entry : entries) {
- entry.SetTotal(total_time, total_call_count);
- entry.Print(os);
- }
- os << std::string(88, '-') << std::endl;
- Entry("Total", total_time, total_call_count).Print(os);
- }
-
- // By default, the compiler will usually inline this, which results in a large
- // binary size increase: std::vector::push_back expands to a large amount of
- // instructions, and this function is invoked repeatedly by macros.
- V8_NOINLINE void Add(RuntimeCallCounter* counter) {
- if (counter->count() == 0) return;
- entries.push_back(
- Entry(counter->name(), counter->time(), counter->count()));
- total_time += counter->time();
- total_call_count += counter->count();
- }
-
- private:
- class Entry {
- public:
- Entry(const char* name, base::TimeDelta time, uint64_t count)
- : name_(name),
- time_(time.InMicroseconds()),
- count_(count),
- time_percent_(100),
- count_percent_(100) {}
-
- bool operator<(const Entry& other) const {
- if (time_ < other.time_) return true;
- if (time_ > other.time_) return false;
- return count_ < other.count_;
- }
-
- V8_NOINLINE void Print(std::ostream& os) {
- os.precision(2);
- os << std::fixed << std::setprecision(2);
- os << std::setw(50) << name_;
- os << std::setw(10) << static_cast<double>(time_) / 1000 << "ms ";
- os << std::setw(6) << time_percent_ << "%";
- os << std::setw(10) << count_ << " ";
- os << std::setw(6) << count_percent_ << "%";
- os << std::endl;
- }
-
- V8_NOINLINE void SetTotal(base::TimeDelta total_time,
- uint64_t total_count) {
- if (total_time.InMicroseconds() == 0) {
- time_percent_ = 0;
- } else {
- time_percent_ = 100.0 * time_ / total_time.InMicroseconds();
- }
- count_percent_ = 100.0 * count_ / total_count;
- }
-
- private:
- const char* name_;
- int64_t time_;
- uint64_t count_;
- double time_percent_;
- double count_percent_;
- };
-
- uint64_t total_call_count = 0;
- base::TimeDelta total_time;
- std::vector<Entry> entries;
-};
-
-void RuntimeCallCounter::Reset() {
- count_ = 0;
- time_ = 0;
-}
-
-void RuntimeCallCounter::Dump(v8::tracing::TracedValue* value) {
- value->BeginArray(name_);
- value->AppendDouble(count_);
- value->AppendDouble(time_);
- value->EndArray();
-}
-
-void RuntimeCallCounter::Add(RuntimeCallCounter* other) {
- count_ += other->count();
- time_ += other->time().InMicroseconds();
-}
-
-void RuntimeCallTimer::Snapshot() {
- base::TimeTicks now = Now();
- // Pause only / topmost timer in the timer stack.
- Pause(now);
- // Commit all the timer's elapsed time to the counters.
- RuntimeCallTimer* timer = this;
- while (timer != nullptr) {
- timer->CommitTimeToCounter();
- timer = timer->parent();
- }
- Resume(now);
-}
-
-RuntimeCallStats::RuntimeCallStats(ThreadType thread_type)
- : in_use_(false), thread_type_(thread_type) {
- static const char* const kNames[] = {
-#define CALL_BUILTIN_COUNTER(name) "GC_" #name,
- FOR_EACH_GC_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_RUNTIME_COUNTER(name) #name,
- FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) #name,
- FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) #name,
- BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) "API_" #name,
- FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) #name,
- FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define THREAD_SPECIFIC_COUNTER(name) #name,
- FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER) //
-#undef THREAD_SPECIFIC_COUNTER
- };
- for (int i = 0; i < kNumberOfCounters; i++) {
- this->counters_[i] = RuntimeCallCounter(kNames[i]);
- }
- if (FLAG_rcs_cpu_time) {
- CHECK(base::ThreadTicks::IsSupported());
- base::ThreadTicks::WaitUntilInitialized();
- RuntimeCallTimer::Now = &RuntimeCallTimer::NowCPUTime;
- }
-}
-
-namespace {
-constexpr RuntimeCallCounterId FirstCounter(RuntimeCallCounterId first, ...) {
- return first;
-}
-
-#define THREAD_SPECIFIC_COUNTER(name) k##name,
-constexpr RuntimeCallCounterId kFirstThreadVariantCounter =
- FirstCounter(FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER) 0);
-#undef THREAD_SPECIFIC_COUNTER
-
-#define THREAD_SPECIFIC_COUNTER(name) +1
-constexpr int kThreadVariantCounterCount =
- 0 FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER);
-#undef THREAD_SPECIFIC_COUNTER
-
-constexpr auto kLastThreadVariantCounter = static_cast<RuntimeCallCounterId>(
- kFirstThreadVariantCounter + kThreadVariantCounterCount - 1);
-} // namespace
-
-bool RuntimeCallStats::HasThreadSpecificCounterVariants(
- RuntimeCallCounterId id) {
- // Check that it's in the range of the thread-specific variant counters and
- // also that it's one of the background counters.
- return id >= kFirstThreadVariantCounter && id <= kLastThreadVariantCounter;
-}
-
-bool RuntimeCallStats::IsBackgroundThreadSpecificVariant(
- RuntimeCallCounterId id) {
- return HasThreadSpecificCounterVariants(id) &&
- (id - kFirstThreadVariantCounter) % 2 == 1;
-}
-
-void RuntimeCallStats::Enter(RuntimeCallTimer* timer,
- RuntimeCallCounterId counter_id) {
- DCHECK(IsCalledOnTheSameThread());
- RuntimeCallCounter* counter = GetCounter(counter_id);
- DCHECK_NOT_NULL(counter->name());
- timer->Start(counter, current_timer());
- current_timer_.SetValue(timer);
- current_counter_.SetValue(counter);
-}
-
-void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
- DCHECK(IsCalledOnTheSameThread());
- RuntimeCallTimer* stack_top = current_timer();
- if (stack_top == nullptr) return; // Missing timer is a result of Reset().
- CHECK(stack_top == timer);
- current_timer_.SetValue(timer->Stop());
- RuntimeCallTimer* cur_timer = current_timer();
- current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
-}
-
-void RuntimeCallStats::Add(RuntimeCallStats* other) {
- for (int i = 0; i < kNumberOfCounters; i++) {
- GetCounter(i)->Add(other->GetCounter(i));
- }
-}
-
-// static
-void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallCounterId counter_id,
- CounterMode mode) {
- DCHECK(IsCalledOnTheSameThread());
- if (mode == RuntimeCallStats::CounterMode::kThreadSpecific) {
- counter_id = CounterIdForThread(counter_id);
- }
- DCHECK(IsCounterAppropriateForThread(counter_id));
-
- RuntimeCallTimer* timer = current_timer();
- if (timer == nullptr) return;
- RuntimeCallCounter* counter = GetCounter(counter_id);
- timer->set_counter(counter);
- current_counter_.SetValue(counter);
-}
-
-bool RuntimeCallStats::IsCalledOnTheSameThread() {
- if (thread_id_.IsValid()) return thread_id_ == ThreadId::Current();
- thread_id_ = ThreadId::Current();
- return true;
-}
-
-void RuntimeCallStats::Print() {
- StdoutStream os;
- Print(os);
-}
-
-void RuntimeCallStats::Print(std::ostream& os) {
- RuntimeCallStatEntries entries;
- if (current_timer_.Value() != nullptr) {
- current_timer_.Value()->Snapshot();
- }
- for (int i = 0; i < kNumberOfCounters; i++) {
- entries.Add(GetCounter(i));
- }
- entries.Print(os);
-}
-
-void RuntimeCallStats::EnumerateCounters(
- debug::RuntimeCallCounterCallback callback) {
- if (current_timer_.Value() != nullptr) {
- current_timer_.Value()->Snapshot();
- }
- for (int i = 0; i < kNumberOfCounters; i++) {
- RuntimeCallCounter* counter = GetCounter(i);
- callback(counter->name(), counter->count(), counter->time());
- }
-}
-
-void RuntimeCallStats::Reset() {
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
-
- // In tracing, we only what to trace the time spent on top level trace events,
- // if runtime counter stack is not empty, we should clear the whole runtime
- // counter stack, and then reset counters so that we can dump counters into
- // top level trace events accurately.
- while (current_timer_.Value()) {
- current_timer_.SetValue(current_timer_.Value()->Stop());
- }
-
- for (int i = 0; i < kNumberOfCounters; i++) {
- GetCounter(i)->Reset();
- }
-
- in_use_ = true;
-}
-
-void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
- for (int i = 0; i < kNumberOfCounters; i++) {
- if (GetCounter(i)->count() > 0) GetCounter(i)->Dump(value);
- }
- in_use_ = false;
-}
-
-WorkerThreadRuntimeCallStats::WorkerThreadRuntimeCallStats()
- : isolate_thread_id_(ThreadId::Current()) {}
-
-WorkerThreadRuntimeCallStats::~WorkerThreadRuntimeCallStats() {
- if (tls_key_) base::Thread::DeleteThreadLocalKey(*tls_key_);
-}
-
-base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() {
- base::MutexGuard lock(&mutex_);
- DCHECK(TracingFlags::is_runtime_stats_enabled());
- if (!tls_key_) tls_key_ = base::Thread::CreateThreadLocalKey();
- return *tls_key_;
-}
-
-RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
- DCHECK(TracingFlags::is_runtime_stats_enabled());
- // Never create a new worker table on the isolate's main thread.
- DCHECK_NE(ThreadId::Current(), isolate_thread_id_);
- std::unique_ptr<RuntimeCallStats> new_table =
- std::make_unique<RuntimeCallStats>(RuntimeCallStats::kWorkerThread);
- RuntimeCallStats* result = new_table.get();
-
- base::MutexGuard lock(&mutex_);
- tables_.push_back(std::move(new_table));
- return result;
-}
-
-void WorkerThreadRuntimeCallStats::AddToMainTable(
- RuntimeCallStats* main_call_stats) {
- base::MutexGuard lock(&mutex_);
- for (auto& worker_stats : tables_) {
- DCHECK_NE(main_call_stats, worker_stats.get());
- main_call_stats->Add(worker_stats.get());
- worker_stats->Reset();
- }
-}
-
-WorkerThreadRuntimeCallStatsScope::WorkerThreadRuntimeCallStatsScope(
- WorkerThreadRuntimeCallStats* worker_stats)
- : table_(nullptr) {
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
-
- table_ = reinterpret_cast<RuntimeCallStats*>(
- base::Thread::GetThreadLocal(worker_stats->GetKey()));
- if (table_ == nullptr) {
- table_ = worker_stats->NewTable();
- base::Thread::SetThreadLocal(worker_stats->GetKey(), table_);
- }
-
- if ((TracingFlags::runtime_stats.load(std::memory_order_relaxed) &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- table_->Reset();
- }
-}
-
-WorkerThreadRuntimeCallStatsScope::~WorkerThreadRuntimeCallStatsScope() {
- if (V8_LIKELY(table_ == nullptr)) return;
-
- if ((TracingFlags::runtime_stats.load(std::memory_order_relaxed) &
- v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
- auto value = v8::tracing::TracedValue::Create();
- table_->Dump(value.get());
- TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
- "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
- "runtime-call-stats", std::move(value));
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/logging/counters.h b/chromium/v8/src/logging/counters.h
index 89cda727542..0de6a8d0e66 100644
--- a/chromium/v8/src/logging/counters.h
+++ b/chromium/v8/src/logging/counters.h
@@ -13,16 +13,10 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/time.h"
#include "src/common/globals.h"
-#include "src/debug/debug-interface.h"
#include "src/execution/isolate.h"
-#include "src/init/heap-symbols.h"
#include "src/logging/counters-definitions.h"
-#include "src/logging/tracing-flags.h"
+#include "src/logging/runtime-call-stats.h"
#include "src/objects/objects.h"
-#include "src/runtime/runtime.h"
-#include "src/tracing/trace-event.h"
-#include "src/tracing/traced-value.h"
-#include "src/tracing/tracing-category-observer.h"
#include "src/utils/allocation.h"
namespace v8 {
@@ -648,668 +642,6 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
value * ((current_ms - last_ms_) / interval_ms);
}
-class RuntimeCallCounter final {
- public:
- RuntimeCallCounter() : RuntimeCallCounter(nullptr) {}
- explicit RuntimeCallCounter(const char* name)
- : name_(name), count_(0), time_(0) {}
- V8_NOINLINE void Reset();
- V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
- void Add(RuntimeCallCounter* other);
-
- const char* name() const { return name_; }
- int64_t count() const { return count_; }
- base::TimeDelta time() const {
- return base::TimeDelta::FromMicroseconds(time_);
- }
- void Increment() { count_++; }
- void Add(base::TimeDelta delta) { time_ += delta.InMicroseconds(); }
-
- private:
- friend class RuntimeCallStats;
-
- const char* name_;
- int64_t count_;
- // Stored as int64_t so that its initialization can be deferred.
- int64_t time_;
-};
-
-// RuntimeCallTimer is used to keep track of the stack of currently active
-// timers used for properly measuring the own time of a RuntimeCallCounter.
-class RuntimeCallTimer final {
- public:
- RuntimeCallCounter* counter() { return counter_; }
- void set_counter(RuntimeCallCounter* counter) { counter_ = counter; }
- RuntimeCallTimer* parent() const { return parent_.Value(); }
- void set_parent(RuntimeCallTimer* timer) { parent_.SetValue(timer); }
- const char* name() const { return counter_->name(); }
-
- inline bool IsStarted();
-
- inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent);
- void Snapshot();
- inline RuntimeCallTimer* Stop();
-
- // Make the time source configurable for testing purposes.
- V8_EXPORT_PRIVATE static base::TimeTicks (*Now)();
-
- // Helper to switch over to CPU time.
- static base::TimeTicks NowCPUTime();
-
- private:
- inline void Pause(base::TimeTicks now);
- inline void Resume(base::TimeTicks now);
- inline void CommitTimeToCounter();
-
- RuntimeCallCounter* counter_ = nullptr;
- base::AtomicValue<RuntimeCallTimer*> parent_;
- base::TimeTicks start_ticks_;
- base::TimeDelta elapsed_;
-};
-
-#define FOR_EACH_GC_COUNTER(V) \
- TRACER_SCOPES(V) \
- TRACER_BACKGROUND_SCOPES(V)
-
-#define FOR_EACH_API_COUNTER(V) \
- V(AccessorPair_New) \
- V(ArrayBuffer_Cast) \
- V(ArrayBuffer_Detach) \
- V(ArrayBuffer_New) \
- V(ArrayBuffer_NewBackingStore) \
- V(ArrayBuffer_BackingStore_Reallocate) \
- V(Array_CloneElementAt) \
- V(Array_New) \
- V(BigInt64Array_New) \
- V(BigInt_NewFromWords) \
- V(BigIntObject_BigIntValue) \
- V(BigIntObject_New) \
- V(BigUint64Array_New) \
- V(BooleanObject_BooleanValue) \
- V(BooleanObject_New) \
- V(Context_New) \
- V(Context_NewRemoteContext) \
- V(DataView_New) \
- V(Date_New) \
- V(Date_NumberValue) \
- V(Debug_Call) \
- V(debug_GetPrivateMembers) \
- V(Error_New) \
- V(External_New) \
- V(Float32Array_New) \
- V(Float64Array_New) \
- V(Function_Call) \
- V(Function_New) \
- V(Function_FunctionProtoToString) \
- V(Function_NewInstance) \
- V(FunctionTemplate_GetFunction) \
- V(FunctionTemplate_New) \
- V(FunctionTemplate_NewRemoteInstance) \
- V(FunctionTemplate_NewWithCache) \
- V(FunctionTemplate_NewWithFastHandler) \
- V(Int16Array_New) \
- V(Int32Array_New) \
- V(Int8Array_New) \
- V(Isolate_DateTimeConfigurationChangeNotification) \
- V(Isolate_LocaleConfigurationChangeNotification) \
- V(JSON_Parse) \
- V(JSON_Stringify) \
- V(Map_AsArray) \
- V(Map_Clear) \
- V(Map_Delete) \
- V(Map_Get) \
- V(Map_Has) \
- V(Map_New) \
- V(Map_Set) \
- V(Message_GetEndColumn) \
- V(Message_GetLineNumber) \
- V(Message_GetSourceLine) \
- V(Message_GetStartColumn) \
- V(Module_Evaluate) \
- V(Module_InstantiateModule) \
- V(Module_SetSyntheticModuleExport) \
- V(NumberObject_New) \
- V(NumberObject_NumberValue) \
- V(Object_CallAsConstructor) \
- V(Object_CallAsFunction) \
- V(Object_CreateDataProperty) \
- V(Object_DefineOwnProperty) \
- V(Object_DefineProperty) \
- V(Object_Delete) \
- V(Object_DeleteProperty) \
- V(Object_ForceSet) \
- V(Object_Get) \
- V(Object_GetOwnPropertyDescriptor) \
- V(Object_GetOwnPropertyNames) \
- V(Object_GetPropertyAttributes) \
- V(Object_GetPropertyNames) \
- V(Object_GetRealNamedProperty) \
- V(Object_GetRealNamedPropertyAttributes) \
- V(Object_GetRealNamedPropertyAttributesInPrototypeChain) \
- V(Object_GetRealNamedPropertyInPrototypeChain) \
- V(Object_Has) \
- V(Object_HasOwnProperty) \
- V(Object_HasRealIndexedProperty) \
- V(Object_HasRealNamedCallbackProperty) \
- V(Object_HasRealNamedProperty) \
- V(Object_IsCodeLike) \
- V(Object_New) \
- V(Object_ObjectProtoToString) \
- V(Object_Set) \
- V(Object_SetAccessor) \
- V(Object_SetIntegrityLevel) \
- V(Object_SetPrivate) \
- V(Object_SetPrototype) \
- V(ObjectTemplate_New) \
- V(ObjectTemplate_NewInstance) \
- V(Object_ToArrayIndex) \
- V(Object_ToBigInt) \
- V(Object_ToDetailString) \
- V(Object_ToInt32) \
- V(Object_ToInteger) \
- V(Object_ToNumber) \
- V(Object_ToObject) \
- V(Object_ToString) \
- V(Object_ToUint32) \
- V(Persistent_New) \
- V(Private_New) \
- V(Promise_Catch) \
- V(Promise_Chain) \
- V(Promise_HasRejectHandler) \
- V(Promise_Resolver_New) \
- V(Promise_Resolver_Reject) \
- V(Promise_Resolver_Resolve) \
- V(Promise_Result) \
- V(Promise_Status) \
- V(Promise_Then) \
- V(Proxy_New) \
- V(RangeError_New) \
- V(ReferenceError_New) \
- V(RegExp_Exec) \
- V(RegExp_New) \
- V(ScriptCompiler_Compile) \
- V(ScriptCompiler_CompileFunctionInContext) \
- V(ScriptCompiler_CompileUnbound) \
- V(Script_Run) \
- V(Set_Add) \
- V(Set_AsArray) \
- V(Set_Clear) \
- V(Set_Delete) \
- V(Set_Has) \
- V(Set_New) \
- V(SharedArrayBuffer_New) \
- V(SharedArrayBuffer_NewBackingStore) \
- V(String_Concat) \
- V(String_NewExternalOneByte) \
- V(String_NewExternalTwoByte) \
- V(String_NewFromOneByte) \
- V(String_NewFromTwoByte) \
- V(String_NewFromUtf8) \
- V(String_NewFromUtf8Literal) \
- V(StringObject_New) \
- V(StringObject_StringValue) \
- V(String_Write) \
- V(String_WriteUtf8) \
- V(Symbol_New) \
- V(SymbolObject_New) \
- V(SymbolObject_SymbolValue) \
- V(SyntaxError_New) \
- V(TracedGlobal_New) \
- V(TryCatch_StackTrace) \
- V(TypeError_New) \
- V(Uint16Array_New) \
- V(Uint32Array_New) \
- V(Uint8Array_New) \
- V(Uint8ClampedArray_New) \
- V(UnboundScript_GetId) \
- V(UnboundScript_GetLineNumber) \
- V(UnboundScript_GetName) \
- V(UnboundScript_GetSourceMappingURL) \
- V(UnboundScript_GetSourceURL) \
- V(ValueDeserializer_ReadHeader) \
- V(ValueDeserializer_ReadValue) \
- V(ValueSerializer_WriteValue) \
- V(Value_Equals) \
- V(Value_InstanceOf) \
- V(Value_Int32Value) \
- V(Value_IntegerValue) \
- V(Value_NumberValue) \
- V(Value_TypeOf) \
- V(Value_Uint32Value) \
- V(WasmCompileError_New) \
- V(WasmLinkError_New) \
- V(WasmRuntimeError_New) \
- V(WeakMap_Get) \
- V(WeakMap_New) \
- V(WeakMap_Set)
-
-#define ADD_THREAD_SPECIFIC_COUNTER(V, Prefix, Suffix) \
- V(Prefix##Suffix) \
- V(Prefix##Background##Suffix)
-
-#define FOR_EACH_THREAD_SPECIFIC_COUNTER(V) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Analyse) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Eval) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Function) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Ignition) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, IgnitionFinalization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, RewriteReturnResult) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
- \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAEarlyOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecideSpillingMode) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecompressionOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LocateSpillSlots) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopExitElimination) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopPeeling) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeMoves) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PopulatePointerMaps) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PrintGraph) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolveControlFlow) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolvePhis) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, \
- ScheduledEffectControlLinearization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ScheduledMachineLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Scheduling) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SelectInstructions) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SimplifiedLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, StoreStoreElimination) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
- \
- ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Parse, Program) \
- ADD_THREAD_SPECIFIC_COUNTER(V, PreParse, ArrowFunctionLiteral) \
- ADD_THREAD_SPECIFIC_COUNTER(V, PreParse, WithVariableResolution)
-
-#define FOR_EACH_MANUAL_COUNTER(V) \
- V(AccessorGetterCallback) \
- V(AccessorSetterCallback) \
- V(ArrayLengthGetter) \
- V(ArrayLengthSetter) \
- V(BoundFunctionLengthGetter) \
- V(BoundFunctionNameGetter) \
- V(CodeGenerationFromStringsCallbacks) \
- V(CompileBackgroundCompileTask) \
- V(CompileBaseline) \
- V(CompileBaselineVisit) \
- V(CompileBaselinePreVisit) \
- V(CompileCollectSourcePositions) \
- V(CompileDeserialize) \
- V(CompileEnqueueOnDispatcher) \
- V(CompileFinalizeBackgroundCompileTask) \
- V(CompileFinishNowOnDispatcher) \
- V(CompileGetFromOptimizedCodeMap) \
- V(CompilePublishBackgroundFinalization) \
- V(CompileSerialize) \
- V(CompileWaitForDispatcher) \
- V(ConfigureInstance) \
- V(CreateApiFunction) \
- V(DeoptimizeCode) \
- V(DeserializeContext) \
- V(DeserializeIsolate) \
- V(FinalizationRegistryCleanupFromTask) \
- V(FunctionCallback) \
- V(FunctionLengthGetter) \
- V(FunctionPrototypeGetter) \
- V(FunctionPrototypeSetter) \
- V(GC_Custom_AllAvailableGarbage) \
- V(GC_Custom_IncrementalMarkingObserver) \
- V(GC_Custom_SlowAllocateRaw) \
- V(GCEpilogueCallback) \
- V(GCPrologueCallback) \
- V(Genesis) \
- V(GetCompatibleReceiver) \
- V(GetMoreDataCallback) \
- V(IndexedDefinerCallback) \
- V(IndexedDeleterCallback) \
- V(IndexedDescriptorCallback) \
- V(IndexedEnumeratorCallback) \
- V(IndexedGetterCallback) \
- V(IndexedQueryCallback) \
- V(IndexedSetterCallback) \
- V(InstantiateFunction) \
- V(InstantiateObject) \
- V(Invoke) \
- V(InvokeApiFunction) \
- V(InvokeApiInterruptCallbacks) \
- V(IsCompatibleReceiver) \
- V(IsCompatibleReceiverMap) \
- V(IsTemplateFor) \
- V(JS_Execution) \
- V(Map_SetPrototype) \
- V(Map_TransitionToAccessorProperty) \
- V(Map_TransitionToDataProperty) \
- V(MessageListenerCallback) \
- V(NamedDefinerCallback) \
- V(NamedDeleterCallback) \
- V(NamedDescriptorCallback) \
- V(NamedEnumeratorCallback) \
- V(NamedGetterCallback) \
- V(NamedQueryCallback) \
- V(NamedSetterCallback) \
- V(Object_DeleteProperty) \
- V(ObjectVerify) \
- V(OptimizeBackgroundDispatcherJob) \
- V(OptimizeCode) \
- V(OptimizeConcurrentFinalize) \
- V(OptimizeConcurrentPrepare) \
- V(OptimizeFinalizePipelineJob) \
- V(OptimizeHeapBrokerInitialization) \
- V(OptimizeNonConcurrent) \
- V(OptimizeSerialization) \
- V(OptimizeSerializeMetadata) \
- V(ParseEval) \
- V(ParseFunction) \
- V(PropertyCallback) \
- V(PrototypeMap_TransitionToAccessorProperty) \
- V(PrototypeMap_TransitionToDataProperty) \
- V(PrototypeObject_DeleteProperty) \
- V(ReconfigureToDataProperty) \
- V(UpdateProtector) \
- V(StringLengthGetter) \
- V(TestCounter1) \
- V(TestCounter2) \
- V(TestCounter3)
-
-#define FOR_EACH_HANDLER_COUNTER(V) \
- V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
- V(KeyedLoadIC_LoadElementDH) \
- V(KeyedLoadIC_LoadIndexedInterceptorStub) \
- V(KeyedLoadIC_LoadIndexedStringDH) \
- V(KeyedLoadIC_SlowStub) \
- V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
- V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
- V(KeyedStoreIC_SlowStub) \
- V(KeyedStoreIC_StoreElementStub) \
- V(KeyedStoreIC_StoreFastElementStub) \
- V(LoadGlobalIC_LoadScriptContextField) \
- V(LoadGlobalIC_SlowStub) \
- V(LoadIC_FunctionPrototypeStub) \
- V(LoadIC_HandlerCacheHit_Accessor) \
- V(LoadIC_LoadAccessorDH) \
- V(LoadIC_LoadAccessorFromPrototypeDH) \
- V(LoadIC_LoadApiGetterFromPrototypeDH) \
- V(LoadIC_LoadCallback) \
- V(LoadIC_LoadConstantDH) \
- V(LoadIC_LoadConstantFromPrototypeDH) \
- V(LoadIC_LoadFieldDH) \
- V(LoadIC_LoadFieldFromPrototypeDH) \
- V(LoadIC_LoadGlobalDH) \
- V(LoadIC_LoadGlobalFromPrototypeDH) \
- V(LoadIC_LoadIntegerIndexedExoticDH) \
- V(LoadIC_LoadInterceptorDH) \
- V(LoadIC_LoadInterceptorFromPrototypeDH) \
- V(LoadIC_LoadNativeDataPropertyDH) \
- V(LoadIC_LoadNativeDataPropertyFromPrototypeDH) \
- V(LoadIC_LoadNonexistentDH) \
- V(LoadIC_LoadNonMaskingInterceptorDH) \
- V(LoadIC_LoadNormalDH) \
- V(LoadIC_LoadNormalFromPrototypeDH) \
- V(LoadIC_NonReceiver) \
- V(LoadIC_SlowStub) \
- V(LoadIC_StringLength) \
- V(LoadIC_StringWrapperLength) \
- V(StoreGlobalIC_SlowStub) \
- V(StoreGlobalIC_StoreScriptContextField) \
- V(StoreIC_HandlerCacheHit_Accessor) \
- V(StoreIC_NonReceiver) \
- V(StoreIC_SlowStub) \
- V(StoreIC_StoreAccessorDH) \
- V(StoreIC_StoreAccessorOnPrototypeDH) \
- V(StoreIC_StoreApiSetterOnPrototypeDH) \
- V(StoreIC_StoreFieldDH) \
- V(StoreIC_StoreGlobalDH) \
- V(StoreIC_StoreGlobalTransitionDH) \
- V(StoreIC_StoreInterceptorStub) \
- V(StoreIC_StoreNativeDataPropertyDH) \
- V(StoreIC_StoreNativeDataPropertyOnPrototypeDH) \
- V(StoreIC_StoreNormalDH) \
- V(StoreIC_StoreTransitionDH) \
- V(StoreInArrayLiteralIC_SlowStub)
-
-enum RuntimeCallCounterId {
-#define CALL_RUNTIME_COUNTER(name) kGC_##name,
- FOR_EACH_GC_COUNTER(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name) k##name,
- FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_RUNTIME_COUNTER(name, nargs, ressize) kRuntime_##name,
- FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) //
-#undef CALL_RUNTIME_COUNTER
-#define CALL_BUILTIN_COUNTER(name) kBuiltin_##name,
- BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) kAPI_##name,
- FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define CALL_BUILTIN_COUNTER(name) kHandler_##name,
- FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) //
-#undef CALL_BUILTIN_COUNTER
-#define THREAD_SPECIFIC_COUNTER(name) k##name,
- FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER) //
-#undef THREAD_SPECIFIC_COUNTER
- kNumberOfCounters,
-};
-
-class RuntimeCallStats final {
- public:
- enum ThreadType { kMainIsolateThread, kWorkerThread };
-
- // If kExact is chosen the counter will be use as given. With kThreadSpecific,
- // if the RuntimeCallStats was created for a worker thread, then the
- // background specific version of the counter will be used instead.
- enum CounterMode { kExact, kThreadSpecific };
-
- explicit V8_EXPORT_PRIVATE RuntimeCallStats(ThreadType thread_type);
-
- // Starting measuring the time for a function. This will establish the
- // connection to the parent counter for properly calculating the own times.
- V8_EXPORT_PRIVATE void Enter(RuntimeCallTimer* timer,
- RuntimeCallCounterId counter_id);
-
- // Leave a scope for a measured runtime function. This will properly add
- // the time delta to the current_counter and subtract the delta from its
- // parent.
- V8_EXPORT_PRIVATE void Leave(RuntimeCallTimer* timer);
-
- // Set counter id for the innermost measurement. It can be used to refine
- // event kind when a runtime entry counter is too generic.
- V8_EXPORT_PRIVATE void CorrectCurrentCounterId(
- RuntimeCallCounterId counter_id, CounterMode mode = kExact);
-
- V8_EXPORT_PRIVATE void Reset();
- // Add all entries from another stats object.
- void Add(RuntimeCallStats* other);
- V8_EXPORT_PRIVATE void Print(std::ostream& os);
- V8_EXPORT_PRIVATE void Print();
- V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
-
- V8_EXPORT_PRIVATE void EnumerateCounters(
- debug::RuntimeCallCounterCallback callback);
-
- ThreadId thread_id() const { return thread_id_; }
- RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
- RuntimeCallCounter* current_counter() { return current_counter_.Value(); }
- bool InUse() { return in_use_; }
- bool IsCalledOnTheSameThread();
-
- V8_EXPORT_PRIVATE bool IsBackgroundThreadSpecificVariant(
- RuntimeCallCounterId id);
- V8_EXPORT_PRIVATE bool HasThreadSpecificCounterVariants(
- RuntimeCallCounterId id);
-
- // This should only be called for counters with a dual Background variant. If
- // on the main thread, this just returns the counter. If on a worker thread,
- // it returns Background variant of the counter.
- RuntimeCallCounterId CounterIdForThread(RuntimeCallCounterId id) {
- DCHECK(HasThreadSpecificCounterVariants(id));
- // All thread specific counters are laid out with the main thread variant
- // first followed by the background variant.
- return thread_type_ == kWorkerThread
- ? static_cast<RuntimeCallCounterId>(id + 1)
- : id;
- }
-
- bool IsCounterAppropriateForThread(RuntimeCallCounterId id) {
- // TODO(delphick): We should add background-only counters and ensure that
- // all counters (not just the thread-specific variants) are only invoked on
- // the correct thread.
- if (!HasThreadSpecificCounterVariants(id)) return true;
- return IsBackgroundThreadSpecificVariant(id) ==
- (thread_type_ == kWorkerThread);
- }
-
- static const int kNumberOfCounters =
- static_cast<int>(RuntimeCallCounterId::kNumberOfCounters);
- RuntimeCallCounter* GetCounter(RuntimeCallCounterId counter_id) {
- return &counters_[static_cast<int>(counter_id)];
- }
- RuntimeCallCounter* GetCounter(int counter_id) {
- return &counters_[counter_id];
- }
-
- private:
- // Top of a stack of active timers.
- base::AtomicValue<RuntimeCallTimer*> current_timer_;
- // Active counter object associated with current timer.
- base::AtomicValue<RuntimeCallCounter*> current_counter_;
- // Used to track nested tracing scopes.
- bool in_use_;
- ThreadType thread_type_;
- ThreadId thread_id_;
- RuntimeCallCounter counters_[kNumberOfCounters];
-};
-
-class WorkerThreadRuntimeCallStats final {
- public:
- WorkerThreadRuntimeCallStats();
- ~WorkerThreadRuntimeCallStats();
-
- // Returns the TLS key associated with this WorkerThreadRuntimeCallStats.
- base::Thread::LocalStorageKey GetKey();
-
- // Returns a new worker thread runtime call stats table managed by this
- // WorkerThreadRuntimeCallStats.
- RuntimeCallStats* NewTable();
-
- // Adds the counters from the worker thread tables to |main_call_stats|.
- void AddToMainTable(RuntimeCallStats* main_call_stats);
-
- private:
- base::Mutex mutex_;
- std::vector<std::unique_ptr<RuntimeCallStats>> tables_;
- base::Optional<base::Thread::LocalStorageKey> tls_key_;
- // Since this is for creating worker thread runtime-call stats, record the
- // main thread ID to ensure we never create a worker RCS table for the main
- // thread.
- ThreadId isolate_thread_id_;
-};
-
-// Creating a WorkerThreadRuntimeCallStatsScope will provide a thread-local
-// runtime call stats table, and will dump the table to an immediate trace event
-// when it is destroyed.
-class V8_NODISCARD WorkerThreadRuntimeCallStatsScope final {
- public:
- explicit WorkerThreadRuntimeCallStatsScope(
- WorkerThreadRuntimeCallStats* off_thread_stats);
- ~WorkerThreadRuntimeCallStatsScope();
-
- RuntimeCallStats* Get() const { return table_; }
-
- private:
- RuntimeCallStats* table_;
-};
-
-#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_id) \
- do { \
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled()) && \
- runtime_call_stats) { \
- runtime_call_stats->CorrectCurrentCounterId(counter_id); \
- } \
- } while (false)
-
-#define TRACE_HANDLER_STATS(isolate, counter_name) \
- CHANGE_CURRENT_RUNTIME_COUNTER( \
- isolate->counters()->runtime_call_stats(), \
- RuntimeCallCounterId::kHandler_##counter_name)
-
-// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
-// the time of C++ scope.
-class V8_NODISCARD RuntimeCallTimerScope {
- public:
- inline RuntimeCallTimerScope(Isolate* isolate,
- RuntimeCallCounterId counter_id);
- inline RuntimeCallTimerScope(RuntimeCallStats* stats,
- RuntimeCallCounterId counter_id,
- RuntimeCallStats::CounterMode mode =
- RuntimeCallStats::CounterMode::kExact) {
- if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled() ||
- stats == nullptr)) {
- return;
- }
- stats_ = stats;
- if (mode == RuntimeCallStats::CounterMode::kThreadSpecific) {
- counter_id = stats->CounterIdForThread(counter_id);
- }
-
- DCHECK(stats->IsCounterAppropriateForThread(counter_id));
- stats_->Enter(&timer_, counter_id);
- }
-
- inline ~RuntimeCallTimerScope() {
- if (V8_UNLIKELY(stats_ != nullptr)) {
- stats_->Leave(&timer_);
- }
- }
-
- RuntimeCallTimerScope(const RuntimeCallTimerScope&) = delete;
- RuntimeCallTimerScope& operator=(const RuntimeCallTimerScope&) = delete;
-
- private:
- RuntimeCallStats* stats_ = nullptr;
- RuntimeCallTimer timer_;
-};
-
// This file contains all the v8 counters that are in use.
class Counters : public std::enable_shared_from_this<Counters> {
public:
@@ -1410,11 +742,19 @@ class Counters : public std::enable_shared_from_this<Counters> {
};
// clang-format on
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallStats* runtime_call_stats() { return &runtime_call_stats_; }
WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats() {
return &worker_thread_runtime_call_stats_;
}
+#else // V8_RUNTIME_CALL_STATS
+ RuntimeCallStats* runtime_call_stats() { return nullptr; }
+
+ WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats() {
+ return nullptr;
+ }
+#endif // V8_RUNTIME_CALL_STATS
private:
friend class StatsTable;
@@ -1422,9 +762,6 @@ class Counters : public std::enable_shared_from_this<Counters> {
friend class Histogram;
friend class HistogramTimer;
- Isolate* isolate_;
- StatsTable stats_table_;
-
int* FindLocation(const char* name) {
return stats_table_.FindLocation(name);
}
@@ -1491,8 +828,12 @@ class Counters : public std::enable_shared_from_this<Counters> {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallStats runtime_call_stats_;
WorkerThreadRuntimeCallStats worker_thread_runtime_call_stats_;
+#endif
+ Isolate* isolate_;
+ StatsTable stats_table_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
@@ -1505,12 +846,14 @@ void HistogramTimer::Stop() {
TimedHistogram::Stop(&timer_, counters()->isolate());
}
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallTimerScope::RuntimeCallTimerScope(Isolate* isolate,
RuntimeCallCounterId counter_id) {
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
stats_ = isolate->counters()->runtime_call_stats();
stats_->Enter(&timer_, counter_id);
}
+#endif // defined(V8_RUNTIME_CALL_STATS)
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/logging/log.cc b/chromium/v8/src/logging/log.cc
index 110152c56bf..1e76a66a66b 100644
--- a/chromium/v8/src/logging/log.cc
+++ b/chromium/v8/src/logging/log.cc
@@ -1728,12 +1728,14 @@ bool Logger::EnsureLogScriptSource(Script script) {
}
void Logger::RuntimeCallTimerEvent() {
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallStats* stats = isolate_->counters()->runtime_call_stats();
RuntimeCallCounter* counter = stats->current_counter();
if (counter == nullptr) return;
MSG_BUILDER();
msg << "active-runtime-timer" << kNext << counter->name();
msg.WriteToLogFile();
+#endif // V8_RUNTIME_CALL_STATS
}
void Logger::TickEvent(TickSample* sample, bool overflow) {
@@ -2127,7 +2129,6 @@ void ExistingCodeLogger::LogCodeObject(Object object) {
case CodeKind::INTERPRETED_FUNCTION:
case CodeKind::TURBOFAN:
case CodeKind::BASELINE:
- case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
case CodeKind::TURBOPROP:
return; // We log this later using LogCompiledFunctions.
case CodeKind::BYTECODE_HANDLER:
@@ -2273,9 +2274,10 @@ void ExistingCodeLogger::LogExistingFunction(
CALL_CODE_EVENT_HANDLER(CallbackEvent(fun_name, entry_point))
// Fast API function.
- Address c_function = v8::ToCData<Address>(fun_data->GetCFunction());
- if (c_function != kNullAddress) {
- CALL_CODE_EVENT_HANDLER(CallbackEvent(fun_name, c_function))
+ int c_functions_count = fun_data->GetCFunctionsCount();
+ for (int i = 0; i < c_functions_count; i++) {
+ CALL_CODE_EVENT_HANDLER(
+ CallbackEvent(fun_name, fun_data->GetCFunction(i)))
}
}
}
diff --git a/chromium/v8/src/logging/log.h b/chromium/v8/src/logging/log.h
index e52f9f28338..ec190320601 100644
--- a/chromium/v8/src/logging/log.h
+++ b/chromium/v8/src/logging/log.h
@@ -208,6 +208,7 @@ class Logger : public CodeEventListener {
Handle<String> source) override;
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override;
+ void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
@@ -410,6 +411,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public CodeEventListener {
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+ void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMovingGCEvent() override {}
void CodeDeoptEvent(Handle<Code> code, DeoptimizeKind kind, Address pc,
int fp_to_sp_delta, bool reuse_code) override {}
@@ -475,6 +477,7 @@ class ExternalCodeEventListener : public CodeEventListener {
void GetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SetterCallbackEvent(Handle<Name> name, Address entry_point) override {}
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+ void NativeContextMoveEvent(Address from, Address to) override {}
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override {}
diff --git a/chromium/v8/src/logging/runtime-call-stats.cc b/chromium/v8/src/logging/runtime-call-stats.cc
new file mode 100644
index 00000000000..86e3215f74e
--- /dev/null
+++ b/chromium/v8/src/logging/runtime-call-stats.cc
@@ -0,0 +1,370 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef V8_RUNTIME_CALL_STATS
+
+#include "src/logging/runtime-call-stats.h"
+
+#include <iomanip>
+
+#include "src/tracing/tracing-category-observer.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+base::TimeTicks (*RuntimeCallTimer::Now)() =
+ &base::TimeTicks::HighResolutionNow;
+
+base::TimeTicks RuntimeCallTimer::NowCPUTime() {
+ base::ThreadTicks ticks = base::ThreadTicks::Now();
+ return base::TimeTicks::FromInternalValue(ticks.ToInternalValue());
+}
+
+class RuntimeCallStatEntries {
+ public:
+ void Print(std::ostream& os) {
+ if (total_call_count == 0) return;
+ std::sort(entries.rbegin(), entries.rend());
+ os << std::setw(50) << "Runtime Function/C++ Builtin" << std::setw(12)
+ << "Time" << std::setw(18) << "Count" << std::endl
+ << std::string(88, '=') << std::endl;
+ for (Entry& entry : entries) {
+ entry.SetTotal(total_time, total_call_count);
+ entry.Print(os);
+ }
+ os << std::string(88, '-') << std::endl;
+ Entry("Total", total_time, total_call_count).Print(os);
+ }
+
+ // By default, the compiler will usually inline this, which results in a large
+ // binary size increase: std::vector::push_back expands to a large amount of
+ // instructions, and this function is invoked repeatedly by macros.
+ V8_NOINLINE void Add(RuntimeCallCounter* counter) {
+ if (counter->count() == 0) return;
+ entries.push_back(
+ Entry(counter->name(), counter->time(), counter->count()));
+ total_time += counter->time();
+ total_call_count += counter->count();
+ }
+
+ private:
+ class Entry {
+ public:
+ Entry(const char* name, base::TimeDelta time, uint64_t count)
+ : name_(name),
+ time_(time.InMicroseconds()),
+ count_(count),
+ time_percent_(100),
+ count_percent_(100) {}
+
+ bool operator<(const Entry& other) const {
+ if (time_ < other.time_) return true;
+ if (time_ > other.time_) return false;
+ return count_ < other.count_;
+ }
+
+ V8_NOINLINE void Print(std::ostream& os) {
+ os.precision(2);
+ os << std::fixed << std::setprecision(2);
+ os << std::setw(50) << name_;
+ os << std::setw(10) << static_cast<double>(time_) / 1000 << "ms ";
+ os << std::setw(6) << time_percent_ << "%";
+ os << std::setw(10) << count_ << " ";
+ os << std::setw(6) << count_percent_ << "%";
+ os << std::endl;
+ }
+
+ V8_NOINLINE void SetTotal(base::TimeDelta total_time,
+ uint64_t total_count) {
+ if (total_time.InMicroseconds() == 0) {
+ time_percent_ = 0;
+ } else {
+ time_percent_ = 100.0 * time_ / total_time.InMicroseconds();
+ }
+ count_percent_ = 100.0 * count_ / total_count;
+ }
+
+ private:
+ const char* name_;
+ int64_t time_;
+ uint64_t count_;
+ double time_percent_;
+ double count_percent_;
+ };
+
+ uint64_t total_call_count = 0;
+ base::TimeDelta total_time;
+ std::vector<Entry> entries;
+};
+
+void RuntimeCallCounter::Reset() {
+ count_ = 0;
+ time_ = 0;
+}
+
+void RuntimeCallCounter::Dump(v8::tracing::TracedValue* value) {
+ value->BeginArray(name_);
+ value->AppendDouble(count_);
+ value->AppendDouble(time_);
+ value->EndArray();
+}
+
+void RuntimeCallCounter::Add(RuntimeCallCounter* other) {
+ count_ += other->count();
+ time_ += other->time().InMicroseconds();
+}
+
+void RuntimeCallTimer::Snapshot() {
+ base::TimeTicks now = Now();
+ // Pause only / topmost timer in the timer stack.
+ Pause(now);
+ // Commit all the timer's elapsed time to the counters.
+ RuntimeCallTimer* timer = this;
+ while (timer != nullptr) {
+ timer->CommitTimeToCounter();
+ timer = timer->parent();
+ }
+ Resume(now);
+}
+
+RuntimeCallStats::RuntimeCallStats(ThreadType thread_type)
+ : in_use_(false), thread_type_(thread_type) {
+ static const char* const kNames[] = {
+#define CALL_BUILTIN_COUNTER(name) "GC_" #name,
+ FOR_EACH_GC_COUNTER(CALL_BUILTIN_COUNTER) //
+#undef CALL_BUILTIN_COUNTER
+#define CALL_RUNTIME_COUNTER(name) #name,
+ FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) //
+#undef CALL_RUNTIME_COUNTER
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) #name,
+ FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) //
+#undef CALL_RUNTIME_COUNTER
+#define CALL_BUILTIN_COUNTER(name) #name,
+ BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) //
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) "API_" #name,
+ FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) //
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) #name,
+ FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) //
+#undef CALL_BUILTIN_COUNTER
+#define THREAD_SPECIFIC_COUNTER(name) #name,
+ FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER) //
+#undef THREAD_SPECIFIC_COUNTER
+ };
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ this->counters_[i] = RuntimeCallCounter(kNames[i]);
+ }
+ if (FLAG_rcs_cpu_time) {
+ CHECK(base::ThreadTicks::IsSupported());
+ base::ThreadTicks::WaitUntilInitialized();
+ RuntimeCallTimer::Now = &RuntimeCallTimer::NowCPUTime;
+ }
+}
+
+namespace {
+constexpr RuntimeCallCounterId FirstCounter(RuntimeCallCounterId first, ...) {
+ return first;
+}
+
+#define THREAD_SPECIFIC_COUNTER(name) k##name,
+constexpr RuntimeCallCounterId kFirstThreadVariantCounter =
+ FirstCounter(FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER) 0);
+#undef THREAD_SPECIFIC_COUNTER
+
+#define THREAD_SPECIFIC_COUNTER(name) +1
+constexpr int kThreadVariantCounterCount =
+ 0 FOR_EACH_THREAD_SPECIFIC_COUNTER(THREAD_SPECIFIC_COUNTER);
+#undef THREAD_SPECIFIC_COUNTER
+
+constexpr auto kLastThreadVariantCounter = static_cast<RuntimeCallCounterId>(
+ kFirstThreadVariantCounter + kThreadVariantCounterCount - 1);
+} // namespace
+
+bool RuntimeCallStats::HasThreadSpecificCounterVariants(
+ RuntimeCallCounterId id) {
+ // Check that it's in the range of the thread-specific variant counters and
+ // also that it's one of the background counters.
+ return id >= kFirstThreadVariantCounter && id <= kLastThreadVariantCounter;
+}
+
+bool RuntimeCallStats::IsBackgroundThreadSpecificVariant(
+ RuntimeCallCounterId id) {
+ return HasThreadSpecificCounterVariants(id) &&
+ (id - kFirstThreadVariantCounter) % 2 == 1;
+}
+
+void RuntimeCallStats::Enter(RuntimeCallTimer* timer,
+ RuntimeCallCounterId counter_id) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallCounter* counter = GetCounter(counter_id);
+ DCHECK_NOT_NULL(counter->name());
+ timer->Start(counter, current_timer());
+ current_timer_.SetValue(timer);
+ current_counter_.SetValue(counter);
+}
+
+void RuntimeCallStats::Leave(RuntimeCallTimer* timer) {
+ DCHECK(IsCalledOnTheSameThread());
+ RuntimeCallTimer* stack_top = current_timer();
+ if (stack_top == nullptr) return; // Missing timer is a result of Reset().
+ CHECK(stack_top == timer);
+ current_timer_.SetValue(timer->Stop());
+ RuntimeCallTimer* cur_timer = current_timer();
+ current_counter_.SetValue(cur_timer ? cur_timer->counter() : nullptr);
+}
+
+void RuntimeCallStats::Add(RuntimeCallStats* other) {
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ GetCounter(i)->Add(other->GetCounter(i));
+ }
+}
+
+// static
+void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallCounterId counter_id,
+ CounterMode mode) {
+ DCHECK(IsCalledOnTheSameThread());
+ if (mode == RuntimeCallStats::CounterMode::kThreadSpecific) {
+ counter_id = CounterIdForThread(counter_id);
+ }
+ DCHECK(IsCounterAppropriateForThread(counter_id));
+
+ RuntimeCallTimer* timer = current_timer();
+ if (timer == nullptr) return;
+ RuntimeCallCounter* counter = GetCounter(counter_id);
+ timer->set_counter(counter);
+ current_counter_.SetValue(counter);
+}
+
+bool RuntimeCallStats::IsCalledOnTheSameThread() {
+ if (thread_id_.IsValid()) return thread_id_ == ThreadId::Current();
+ thread_id_ = ThreadId::Current();
+ return true;
+}
+
+void RuntimeCallStats::Print() {
+ StdoutStream os;
+ Print(os);
+}
+
+void RuntimeCallStats::Print(std::ostream& os) {
+ RuntimeCallStatEntries entries;
+ if (current_timer_.Value() != nullptr) {
+ current_timer_.Value()->Snapshot();
+ }
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ entries.Add(GetCounter(i));
+ }
+ entries.Print(os);
+}
+
+void RuntimeCallStats::EnumerateCounters(
+ debug::RuntimeCallCounterCallback callback) {
+ if (current_timer_.Value() != nullptr) {
+ current_timer_.Value()->Snapshot();
+ }
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ RuntimeCallCounter* counter = GetCounter(i);
+ callback(counter->name(), counter->count(), counter->time());
+ }
+}
+
+void RuntimeCallStats::Reset() {
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
+
+ // In tracing, we only what to trace the time spent on top level trace events,
+ // if runtime counter stack is not empty, we should clear the whole runtime
+ // counter stack, and then reset counters so that we can dump counters into
+ // top level trace events accurately.
+ while (current_timer_.Value()) {
+ current_timer_.SetValue(current_timer_.Value()->Stop());
+ }
+
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ GetCounter(i)->Reset();
+ }
+
+ in_use_ = true;
+}
+
+void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
+ for (int i = 0; i < kNumberOfCounters; i++) {
+ if (GetCounter(i)->count() > 0) GetCounter(i)->Dump(value);
+ }
+ in_use_ = false;
+}
+
+WorkerThreadRuntimeCallStats::WorkerThreadRuntimeCallStats()
+ : isolate_thread_id_(ThreadId::Current()) {}
+
+WorkerThreadRuntimeCallStats::~WorkerThreadRuntimeCallStats() {
+ if (tls_key_) base::Thread::DeleteThreadLocalKey(*tls_key_);
+}
+
+base::Thread::LocalStorageKey WorkerThreadRuntimeCallStats::GetKey() {
+ base::MutexGuard lock(&mutex_);
+ DCHECK(TracingFlags::is_runtime_stats_enabled());
+ if (!tls_key_) tls_key_ = base::Thread::CreateThreadLocalKey();
+ return *tls_key_;
+}
+
+RuntimeCallStats* WorkerThreadRuntimeCallStats::NewTable() {
+ DCHECK(TracingFlags::is_runtime_stats_enabled());
+ // Never create a new worker table on the isolate's main thread.
+ DCHECK_NE(ThreadId::Current(), isolate_thread_id_);
+ std::unique_ptr<RuntimeCallStats> new_table =
+ std::make_unique<RuntimeCallStats>(RuntimeCallStats::kWorkerThread);
+ RuntimeCallStats* result = new_table.get();
+
+ base::MutexGuard lock(&mutex_);
+ tables_.push_back(std::move(new_table));
+ return result;
+}
+
+void WorkerThreadRuntimeCallStats::AddToMainTable(
+ RuntimeCallStats* main_call_stats) {
+ base::MutexGuard lock(&mutex_);
+ for (auto& worker_stats : tables_) {
+ DCHECK_NE(main_call_stats, worker_stats.get());
+ main_call_stats->Add(worker_stats.get());
+ worker_stats->Reset();
+ }
+}
+
+WorkerThreadRuntimeCallStatsScope::WorkerThreadRuntimeCallStatsScope(
+ WorkerThreadRuntimeCallStats* worker_stats)
+ : table_(nullptr) {
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
+
+ table_ = reinterpret_cast<RuntimeCallStats*>(
+ base::Thread::GetThreadLocal(worker_stats->GetKey()));
+ if (table_ == nullptr) {
+ table_ = worker_stats->NewTable();
+ base::Thread::SetThreadLocal(worker_stats->GetKey(), table_);
+ }
+
+ if ((TracingFlags::runtime_stats.load(std::memory_order_relaxed) &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ table_->Reset();
+ }
+}
+
+WorkerThreadRuntimeCallStatsScope::~WorkerThreadRuntimeCallStatsScope() {
+ if (V8_LIKELY(table_ == nullptr)) return;
+
+ if ((TracingFlags::runtime_stats.load(std::memory_order_relaxed) &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+ auto value = v8::tracing::TracedValue::Create();
+ table_->Dump(value.get());
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
+ "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
+ "runtime-call-stats", std::move(value));
+ }
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_RUNTIME_CALL_STATS
diff --git a/chromium/v8/src/logging/runtime-call-stats.h b/chromium/v8/src/logging/runtime-call-stats.h
new file mode 100644
index 00000000000..7593170d865
--- /dev/null
+++ b/chromium/v8/src/logging/runtime-call-stats.h
@@ -0,0 +1,763 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOGGING_RUNTIME_CALL_STATS_H_
+#define V8_LOGGING_RUNTIME_CALL_STATS_H_
+
+#include "include/v8.h"
+
+#ifdef V8_RUNTIME_CALL_STATS
+
+#include "src/base/atomic-utils.h"
+#include "src/base/optional.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/time.h"
+#include "src/builtins/builtins-definitions.h"
+#include "src/debug/debug-interface.h"
+#include "src/execution/thread-id.h"
+#include "src/init/heap-symbols.h"
+#include "src/logging/tracing-flags.h"
+#include "src/runtime/runtime.h"
+#include "src/tracing/traced-value.h"
+#include "src/tracing/tracing-category-observer.h"
+
+#endif // V8_RUNTIME_CALL_STATS
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_RUNTIME_CALL_STATS
+
+#define RCS_SCOPE(...) \
+ v8::internal::RuntimeCallTimerScope rcs_timer_scope(__VA_ARGS__)
+
+class RuntimeCallCounter final {
+ public:
+ RuntimeCallCounter() : RuntimeCallCounter(nullptr) {}
+ explicit RuntimeCallCounter(const char* name)
+ : name_(name), count_(0), time_(0) {}
+ V8_NOINLINE void Reset();
+ V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
+ void Add(RuntimeCallCounter* other);
+
+ const char* name() const { return name_; }
+ int64_t count() const { return count_; }
+ base::TimeDelta time() const {
+ return base::TimeDelta::FromMicroseconds(time_);
+ }
+ void Increment() { count_++; }
+ void Add(base::TimeDelta delta) { time_ += delta.InMicroseconds(); }
+
+ private:
+ friend class RuntimeCallStats;
+
+ const char* name_;
+ int64_t count_;
+ // Stored as int64_t so that its initialization can be deferred.
+ int64_t time_;
+};
+
+// RuntimeCallTimer is used to keep track of the stack of currently active
+// timers used for properly measuring the own time of a RuntimeCallCounter.
+class RuntimeCallTimer final {
+ public:
+ RuntimeCallCounter* counter() { return counter_; }
+ void set_counter(RuntimeCallCounter* counter) { counter_ = counter; }
+ RuntimeCallTimer* parent() const { return parent_.Value(); }
+ void set_parent(RuntimeCallTimer* timer) { parent_.SetValue(timer); }
+ const char* name() const { return counter_->name(); }
+
+ inline bool IsStarted() const { return start_ticks_ != base::TimeTicks(); }
+
+ inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent) {
+ DCHECK(!IsStarted());
+ counter_ = counter;
+ parent_.SetValue(parent);
+ if (TracingFlags::runtime_stats.load(std::memory_order_relaxed) ==
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
+ return;
+ }
+ base::TimeTicks now = RuntimeCallTimer::Now();
+ if (parent) parent->Pause(now);
+ Resume(now);
+ DCHECK(IsStarted());
+ }
+
+ void Snapshot();
+
+ inline RuntimeCallTimer* Stop() {
+ if (!IsStarted()) return parent();
+ base::TimeTicks now = RuntimeCallTimer::Now();
+ Pause(now);
+ counter_->Increment();
+ CommitTimeToCounter();
+
+ RuntimeCallTimer* parent_timer = parent();
+ if (parent_timer) {
+ parent_timer->Resume(now);
+ }
+ return parent_timer;
+ }
+
+ // Make the time source configurable for testing purposes.
+ V8_EXPORT_PRIVATE static base::TimeTicks (*Now)();
+
+ // Helper to switch over to CPU time.
+ static base::TimeTicks NowCPUTime();
+
+ private:
+ inline void Pause(base::TimeTicks now) {
+ DCHECK(IsStarted());
+ elapsed_ += (now - start_ticks_);
+ start_ticks_ = base::TimeTicks();
+ }
+
+ inline void Resume(base::TimeTicks now) {
+ DCHECK(!IsStarted());
+ start_ticks_ = now;
+ }
+
+ inline void CommitTimeToCounter() {
+ counter_->Add(elapsed_);
+ elapsed_ = base::TimeDelta();
+ }
+
+ RuntimeCallCounter* counter_ = nullptr;
+ base::AtomicValue<RuntimeCallTimer*> parent_;
+ base::TimeTicks start_ticks_;
+ base::TimeDelta elapsed_;
+};
+
+#define FOR_EACH_GC_COUNTER(V) \
+ TRACER_SCOPES(V) \
+ TRACER_BACKGROUND_SCOPES(V)
+
+#define FOR_EACH_API_COUNTER(V) \
+ V(AccessorPair_New) \
+ V(ArrayBuffer_Cast) \
+ V(ArrayBuffer_Detach) \
+ V(ArrayBuffer_New) \
+ V(ArrayBuffer_NewBackingStore) \
+ V(ArrayBuffer_BackingStore_Reallocate) \
+ V(Array_CloneElementAt) \
+ V(Array_New) \
+ V(BigInt64Array_New) \
+ V(BigInt_NewFromWords) \
+ V(BigIntObject_BigIntValue) \
+ V(BigIntObject_New) \
+ V(BigUint64Array_New) \
+ V(BooleanObject_BooleanValue) \
+ V(BooleanObject_New) \
+ V(Context_New) \
+ V(Context_NewRemoteContext) \
+ V(DataView_New) \
+ V(Date_New) \
+ V(Date_NumberValue) \
+ V(Debug_Call) \
+ V(debug_GetPrivateMembers) \
+ V(Error_New) \
+ V(External_New) \
+ V(Float32Array_New) \
+ V(Float64Array_New) \
+ V(Function_Call) \
+ V(Function_New) \
+ V(Function_FunctionProtoToString) \
+ V(Function_NewInstance) \
+ V(FunctionTemplate_GetFunction) \
+ V(FunctionTemplate_New) \
+ V(FunctionTemplate_NewRemoteInstance) \
+ V(FunctionTemplate_NewWithCache) \
+ V(FunctionTemplate_NewWithFastHandler) \
+ V(Int16Array_New) \
+ V(Int32Array_New) \
+ V(Int8Array_New) \
+ V(Isolate_DateTimeConfigurationChangeNotification) \
+ V(Isolate_LocaleConfigurationChangeNotification) \
+ V(JSON_Parse) \
+ V(JSON_Stringify) \
+ V(Map_AsArray) \
+ V(Map_Clear) \
+ V(Map_Delete) \
+ V(Map_Get) \
+ V(Map_Has) \
+ V(Map_New) \
+ V(Map_Set) \
+ V(Message_GetEndColumn) \
+ V(Message_GetLineNumber) \
+ V(Message_GetSourceLine) \
+ V(Message_GetStartColumn) \
+ V(Module_Evaluate) \
+ V(Module_InstantiateModule) \
+ V(Module_SetSyntheticModuleExport) \
+ V(NumberObject_New) \
+ V(NumberObject_NumberValue) \
+ V(Object_CallAsConstructor) \
+ V(Object_CallAsFunction) \
+ V(Object_CreateDataProperty) \
+ V(Object_DefineOwnProperty) \
+ V(Object_DefineProperty) \
+ V(Object_Delete) \
+ V(Object_DeleteProperty) \
+ V(Object_ForceSet) \
+ V(Object_Get) \
+ V(Object_GetOwnPropertyDescriptor) \
+ V(Object_GetOwnPropertyNames) \
+ V(Object_GetPropertyAttributes) \
+ V(Object_GetPropertyNames) \
+ V(Object_GetRealNamedProperty) \
+ V(Object_GetRealNamedPropertyAttributes) \
+ V(Object_GetRealNamedPropertyAttributesInPrototypeChain) \
+ V(Object_GetRealNamedPropertyInPrototypeChain) \
+ V(Object_Has) \
+ V(Object_HasOwnProperty) \
+ V(Object_HasRealIndexedProperty) \
+ V(Object_HasRealNamedCallbackProperty) \
+ V(Object_HasRealNamedProperty) \
+ V(Object_IsCodeLike) \
+ V(Object_New) \
+ V(Object_ObjectProtoToString) \
+ V(Object_Set) \
+ V(Object_SetAccessor) \
+ V(Object_SetIntegrityLevel) \
+ V(Object_SetPrivate) \
+ V(Object_SetPrototype) \
+ V(ObjectTemplate_New) \
+ V(ObjectTemplate_NewInstance) \
+ V(Object_ToArrayIndex) \
+ V(Object_ToBigInt) \
+ V(Object_ToDetailString) \
+ V(Object_ToInt32) \
+ V(Object_ToInteger) \
+ V(Object_ToNumber) \
+ V(Object_ToObject) \
+ V(Object_ToString) \
+ V(Object_ToUint32) \
+ V(Persistent_New) \
+ V(Private_New) \
+ V(Promise_Catch) \
+ V(Promise_Chain) \
+ V(Promise_HasRejectHandler) \
+ V(Promise_Resolver_New) \
+ V(Promise_Resolver_Reject) \
+ V(Promise_Resolver_Resolve) \
+ V(Promise_Result) \
+ V(Promise_Status) \
+ V(Promise_Then) \
+ V(Proxy_New) \
+ V(RangeError_New) \
+ V(ReferenceError_New) \
+ V(RegExp_Exec) \
+ V(RegExp_New) \
+ V(ScriptCompiler_Compile) \
+ V(ScriptCompiler_CompileFunctionInContext) \
+ V(ScriptCompiler_CompileUnbound) \
+ V(Script_Run) \
+ V(Set_Add) \
+ V(Set_AsArray) \
+ V(Set_Clear) \
+ V(Set_Delete) \
+ V(Set_Has) \
+ V(Set_New) \
+ V(SharedArrayBuffer_New) \
+ V(SharedArrayBuffer_NewBackingStore) \
+ V(String_Concat) \
+ V(String_NewExternalOneByte) \
+ V(String_NewExternalTwoByte) \
+ V(String_NewFromOneByte) \
+ V(String_NewFromTwoByte) \
+ V(String_NewFromUtf8) \
+ V(String_NewFromUtf8Literal) \
+ V(StringObject_New) \
+ V(StringObject_StringValue) \
+ V(String_Write) \
+ V(String_WriteUtf8) \
+ V(Symbol_New) \
+ V(SymbolObject_New) \
+ V(SymbolObject_SymbolValue) \
+ V(SyntaxError_New) \
+ V(TracedGlobal_New) \
+ V(TryCatch_StackTrace) \
+ V(TypeError_New) \
+ V(Uint16Array_New) \
+ V(Uint32Array_New) \
+ V(Uint8Array_New) \
+ V(Uint8ClampedArray_New) \
+ V(UnboundScript_GetId) \
+ V(UnboundScript_GetLineNumber) \
+ V(UnboundScript_GetName) \
+ V(UnboundScript_GetSourceMappingURL) \
+ V(UnboundScript_GetSourceURL) \
+ V(ValueDeserializer_ReadHeader) \
+ V(ValueDeserializer_ReadValue) \
+ V(ValueSerializer_WriteValue) \
+ V(Value_Equals) \
+ V(Value_InstanceOf) \
+ V(Value_Int32Value) \
+ V(Value_IntegerValue) \
+ V(Value_NumberValue) \
+ V(Value_TypeOf) \
+ V(Value_Uint32Value) \
+ V(WasmCompileError_New) \
+ V(WasmLinkError_New) \
+ V(WasmRuntimeError_New) \
+ V(WeakMap_Get) \
+ V(WeakMap_New) \
+ V(WeakMap_Set)
+
+#define ADD_THREAD_SPECIFIC_COUNTER(V, Prefix, Suffix) \
+ V(Prefix##Suffix) \
+ V(Prefix##Background##Suffix)
+
+#define FOR_EACH_THREAD_SPECIFIC_COUNTER(V) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Analyse) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Eval) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Function) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Ignition) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, IgnitionFinalization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, RewriteReturnResult) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
+ \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAEarlyOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecideSpillingMode) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecompressionOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LocateSpillSlots) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopExitElimination) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopPeeling) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeMoves) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PopulatePointerMaps) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PrintGraph) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolveControlFlow) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolvePhis) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, \
+ ScheduledEffectControlLinearization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ScheduledMachineLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Scheduling) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SelectInstructions) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SimplifiedLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, StoreStoreElimination) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmInlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmLoopUnrolling) \
+ \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, Program) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, PreParse, ArrowFunctionLiteral) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, PreParse, WithVariableResolution)
+
+#define FOR_EACH_MANUAL_COUNTER(V) \
+ V(AccessorGetterCallback) \
+ V(AccessorSetterCallback) \
+ V(ArrayLengthGetter) \
+ V(ArrayLengthSetter) \
+ V(BoundFunctionLengthGetter) \
+ V(BoundFunctionNameGetter) \
+ V(CodeGenerationFromStringsCallbacks) \
+ V(CompileBackgroundCompileTask) \
+ V(CompileBaseline) \
+ V(CompileBaselineVisit) \
+ V(CompileBaselinePreVisit) \
+ V(CompileCollectSourcePositions) \
+ V(CompileDeserialize) \
+ V(CompileEnqueueOnDispatcher) \
+ V(CompileFinalizeBackgroundCompileTask) \
+ V(CompileFinishNowOnDispatcher) \
+ V(CompileGetFromOptimizedCodeMap) \
+ V(CompilePublishBackgroundFinalization) \
+ V(CompileSerialize) \
+ V(CompileWaitForDispatcher) \
+ V(ConfigureInstance) \
+ V(CreateApiFunction) \
+ V(DeoptimizeCode) \
+ V(DeserializeContext) \
+ V(DeserializeIsolate) \
+ V(FinalizationRegistryCleanupFromTask) \
+ V(FunctionCallback) \
+ V(FunctionLengthGetter) \
+ V(FunctionPrototypeGetter) \
+ V(FunctionPrototypeSetter) \
+ V(GC_Custom_AllAvailableGarbage) \
+ V(GC_Custom_IncrementalMarkingObserver) \
+ V(GC_Custom_SlowAllocateRaw) \
+ V(GCEpilogueCallback) \
+ V(GCPrologueCallback) \
+ V(Genesis) \
+ V(GetCompatibleReceiver) \
+ V(GetMoreDataCallback) \
+ V(IndexedDefinerCallback) \
+ V(IndexedDeleterCallback) \
+ V(IndexedDescriptorCallback) \
+ V(IndexedEnumeratorCallback) \
+ V(IndexedGetterCallback) \
+ V(IndexedQueryCallback) \
+ V(IndexedSetterCallback) \
+ V(InstantiateFunction) \
+ V(InstantiateObject) \
+ V(Invoke) \
+ V(InvokeApiFunction) \
+ V(InvokeApiInterruptCallbacks) \
+ V(IsCompatibleReceiver) \
+ V(IsCompatibleReceiverMap) \
+ V(IsTemplateFor) \
+ V(JS_Execution) \
+ V(Map_SetPrototype) \
+ V(Map_TransitionToAccessorProperty) \
+ V(Map_TransitionToDataProperty) \
+ V(MessageListenerCallback) \
+ V(NamedDefinerCallback) \
+ V(NamedDeleterCallback) \
+ V(NamedDescriptorCallback) \
+ V(NamedEnumeratorCallback) \
+ V(NamedGetterCallback) \
+ V(NamedQueryCallback) \
+ V(NamedSetterCallback) \
+ V(Object_DeleteProperty) \
+ V(ObjectVerify) \
+ V(OptimizeBackgroundDispatcherJob) \
+ V(OptimizeCode) \
+ V(OptimizeConcurrentFinalize) \
+ V(OptimizeConcurrentPrepare) \
+ V(OptimizeFinalizePipelineJob) \
+ V(OptimizeHeapBrokerInitialization) \
+ V(OptimizeNonConcurrent) \
+ V(OptimizeSerialization) \
+ V(OptimizeSerializeMetadata) \
+ V(ParseEval) \
+ V(ParseFunction) \
+ V(PropertyCallback) \
+ V(PrototypeMap_TransitionToAccessorProperty) \
+ V(PrototypeMap_TransitionToDataProperty) \
+ V(PrototypeObject_DeleteProperty) \
+ V(ReconfigureToDataProperty) \
+ V(UpdateProtector) \
+ V(StringLengthGetter) \
+ V(TestCounter1) \
+ V(TestCounter2) \
+ V(TestCounter3)
+
+#define FOR_EACH_HANDLER_COUNTER(V) \
+ V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
+ V(KeyedLoadIC_LoadElementDH) \
+ V(KeyedLoadIC_LoadIndexedInterceptorStub) \
+ V(KeyedLoadIC_LoadIndexedStringDH) \
+ V(KeyedLoadIC_SlowStub) \
+ V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
+ V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
+ V(KeyedStoreIC_SlowStub) \
+ V(KeyedStoreIC_StoreElementStub) \
+ V(KeyedStoreIC_StoreFastElementStub) \
+ V(LoadGlobalIC_LoadScriptContextField) \
+ V(LoadGlobalIC_SlowStub) \
+ V(LoadIC_FunctionPrototypeStub) \
+ V(LoadIC_HandlerCacheHit_Accessor) \
+ V(LoadIC_LoadAccessorDH) \
+ V(LoadIC_LoadAccessorFromPrototypeDH) \
+ V(LoadIC_LoadApiGetterFromPrototypeDH) \
+ V(LoadIC_LoadCallback) \
+ V(LoadIC_LoadConstantDH) \
+ V(LoadIC_LoadConstantFromPrototypeDH) \
+ V(LoadIC_LoadFieldDH) \
+ V(LoadIC_LoadFieldFromPrototypeDH) \
+ V(LoadIC_LoadGlobalDH) \
+ V(LoadIC_LoadGlobalFromPrototypeDH) \
+ V(LoadIC_LoadIntegerIndexedExoticDH) \
+ V(LoadIC_LoadInterceptorDH) \
+ V(LoadIC_LoadInterceptorFromPrototypeDH) \
+ V(LoadIC_LoadNativeDataPropertyDH) \
+ V(LoadIC_LoadNativeDataPropertyFromPrototypeDH) \
+ V(LoadIC_LoadNonexistentDH) \
+ V(LoadIC_LoadNonMaskingInterceptorDH) \
+ V(LoadIC_LoadNormalDH) \
+ V(LoadIC_LoadNormalFromPrototypeDH) \
+ V(LoadIC_NonReceiver) \
+ V(LoadIC_SlowStub) \
+ V(LoadIC_StringLength) \
+ V(LoadIC_StringWrapperLength) \
+ V(StoreGlobalIC_SlowStub) \
+ V(StoreGlobalIC_StoreScriptContextField) \
+ V(StoreIC_HandlerCacheHit_Accessor) \
+ V(StoreIC_NonReceiver) \
+ V(StoreIC_SlowStub) \
+ V(StoreIC_StoreAccessorDH) \
+ V(StoreIC_StoreAccessorOnPrototypeDH) \
+ V(StoreIC_StoreApiSetterOnPrototypeDH) \
+ V(StoreIC_StoreFieldDH) \
+ V(StoreIC_StoreGlobalDH) \
+ V(StoreIC_StoreGlobalTransitionDH) \
+ V(StoreIC_StoreInterceptorStub) \
+ V(StoreIC_StoreNativeDataPropertyDH) \
+ V(StoreIC_StoreNativeDataPropertyOnPrototypeDH) \
+ V(StoreIC_StoreNormalDH) \
+ V(StoreIC_StoreTransitionDH) \
+ V(StoreInArrayLiteralIC_SlowStub)
+
+enum RuntimeCallCounterId {
+#define CALL_RUNTIME_COUNTER(name) kGC_##name,
+ FOR_EACH_GC_COUNTER(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
+#define CALL_RUNTIME_COUNTER(name) k##name,
+ FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
+#define CALL_RUNTIME_COUNTER(name, nargs, ressize) kRuntime_##name,
+ FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER)
+#undef CALL_RUNTIME_COUNTER
+#define CALL_BUILTIN_COUNTER(name) kBuiltin_##name,
+ BUILTIN_LIST_C(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) kAPI_##name,
+ FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+#define CALL_BUILTIN_COUNTER(name) kHandler_##name,
+ FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER)
+#undef CALL_BUILTIN_COUNTER
+#define THREAD_SPECIFIC_COUNTER(name) k##name,
+ FOR_EACH_THREAD_SPECIFIC_COUNTER(
+ THREAD_SPECIFIC_COUNTER)
+#undef THREAD_SPECIFIC_COUNTER
+ kNumberOfCounters,
+};
+
+class RuntimeCallStats final {
+ public:
+ enum ThreadType { kMainIsolateThread, kWorkerThread };
+
+ // If kExact is chosen the counter will be use as given. With kThreadSpecific,
+ // if the RuntimeCallStats was created for a worker thread, then the
+ // background specific version of the counter will be used instead.
+ enum CounterMode { kExact, kThreadSpecific };
+
+ explicit V8_EXPORT_PRIVATE RuntimeCallStats(ThreadType thread_type);
+
+ // Starting measuring the time for a function. This will establish the
+ // connection to the parent counter for properly calculating the own times.
+ V8_EXPORT_PRIVATE void Enter(RuntimeCallTimer* timer,
+ RuntimeCallCounterId counter_id);
+
+ // Leave a scope for a measured runtime function. This will properly add
+ // the time delta to the current_counter and subtract the delta from its
+ // parent.
+ V8_EXPORT_PRIVATE void Leave(RuntimeCallTimer* timer);
+
+ // Set counter id for the innermost measurement. It can be used to refine
+ // event kind when a runtime entry counter is too generic.
+ V8_EXPORT_PRIVATE void CorrectCurrentCounterId(
+ RuntimeCallCounterId counter_id, CounterMode mode = kExact);
+
+ V8_EXPORT_PRIVATE void Reset();
+ // Add all entries from another stats object.
+ void Add(RuntimeCallStats* other);
+ V8_EXPORT_PRIVATE void Print(std::ostream& os);
+ V8_EXPORT_PRIVATE void Print();
+ V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
+
+ V8_EXPORT_PRIVATE void EnumerateCounters(
+ debug::RuntimeCallCounterCallback callback);
+
+ ThreadId thread_id() const { return thread_id_; }
+ RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
+ RuntimeCallCounter* current_counter() { return current_counter_.Value(); }
+ bool InUse() { return in_use_; }
+ bool IsCalledOnTheSameThread();
+
+ V8_EXPORT_PRIVATE bool IsBackgroundThreadSpecificVariant(
+ RuntimeCallCounterId id);
+ V8_EXPORT_PRIVATE bool HasThreadSpecificCounterVariants(
+ RuntimeCallCounterId id);
+
+ // This should only be called for counters with a dual Background variant. If
+ // on the main thread, this just returns the counter. If on a worker thread,
+ // it returns Background variant of the counter.
+ RuntimeCallCounterId CounterIdForThread(RuntimeCallCounterId id) {
+ DCHECK(HasThreadSpecificCounterVariants(id));
+ // All thread specific counters are laid out with the main thread variant
+ // first followed by the background variant.
+ return thread_type_ == kWorkerThread
+ ? static_cast<RuntimeCallCounterId>(id + 1)
+ : id;
+ }
+
+ bool IsCounterAppropriateForThread(RuntimeCallCounterId id) {
+ // TODO(delphick): We should add background-only counters and ensure that
+ // all counters (not just the thread-specific variants) are only invoked on
+ // the correct thread.
+ if (!HasThreadSpecificCounterVariants(id)) return true;
+ return IsBackgroundThreadSpecificVariant(id) ==
+ (thread_type_ == kWorkerThread);
+ }
+
+ static const int kNumberOfCounters =
+ static_cast<int>(RuntimeCallCounterId::kNumberOfCounters);
+ RuntimeCallCounter* GetCounter(RuntimeCallCounterId counter_id) {
+ return &counters_[static_cast<int>(counter_id)];
+ }
+ RuntimeCallCounter* GetCounter(int counter_id) {
+ return &counters_[counter_id];
+ }
+
+ private:
+ // Top of a stack of active timers.
+ base::AtomicValue<RuntimeCallTimer*> current_timer_;
+ // Active counter object associated with current timer.
+ base::AtomicValue<RuntimeCallCounter*> current_counter_;
+ // Used to track nested tracing scopes.
+ bool in_use_;
+ ThreadType thread_type_;
+ ThreadId thread_id_;
+ RuntimeCallCounter counters_[kNumberOfCounters];
+};
+
+class WorkerThreadRuntimeCallStats final {
+ public:
+ WorkerThreadRuntimeCallStats();
+ ~WorkerThreadRuntimeCallStats();
+
+ // Returns the TLS key associated with this WorkerThreadRuntimeCallStats.
+ base::Thread::LocalStorageKey GetKey();
+
+ // Returns a new worker thread runtime call stats table managed by this
+ // WorkerThreadRuntimeCallStats.
+ RuntimeCallStats* NewTable();
+
+ // Adds the counters from the worker thread tables to |main_call_stats|.
+ void AddToMainTable(RuntimeCallStats* main_call_stats);
+
+ private:
+ base::Mutex mutex_;
+ std::vector<std::unique_ptr<RuntimeCallStats>> tables_;
+ base::Optional<base::Thread::LocalStorageKey> tls_key_;
+ // Since this is for creating worker thread runtime-call stats, record the
+ // main thread ID to ensure we never create a worker RCS table for the main
+ // thread.
+ ThreadId isolate_thread_id_;
+};
+
+// Creating a WorkerThreadRuntimeCallStatsScope will provide a thread-local
+// runtime call stats table, and will dump the table to an immediate trace event
+// when it is destroyed.
+class V8_NODISCARD WorkerThreadRuntimeCallStatsScope final {
+ public:
+ explicit WorkerThreadRuntimeCallStatsScope(
+ WorkerThreadRuntimeCallStats* off_thread_stats);
+ ~WorkerThreadRuntimeCallStatsScope();
+
+ RuntimeCallStats* Get() const { return table_; }
+
+ private:
+ RuntimeCallStats* table_;
+};
+
+#define CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats, counter_id) \
+ do { \
+ if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled()) && \
+ runtime_call_stats) { \
+ runtime_call_stats->CorrectCurrentCounterId(counter_id); \
+ } \
+ } while (false)
+
+#define TRACE_HANDLER_STATS(isolate, counter_name) \
+ CHANGE_CURRENT_RUNTIME_COUNTER( \
+ isolate->counters()->runtime_call_stats(), \
+ RuntimeCallCounterId::kHandler_##counter_name)
+
+// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
+// the time of C++ scope.
+class V8_NODISCARD RuntimeCallTimerScope {
+ public:
+ inline RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallCounterId counter_id);
+ inline RuntimeCallTimerScope(RuntimeCallStats* stats,
+ RuntimeCallCounterId counter_id,
+ RuntimeCallStats::CounterMode mode =
+ RuntimeCallStats::CounterMode::kExact) {
+ if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled() ||
+ stats == nullptr)) {
+ return;
+ }
+ stats_ = stats;
+ if (mode == RuntimeCallStats::CounterMode::kThreadSpecific) {
+ counter_id = stats->CounterIdForThread(counter_id);
+ }
+
+ DCHECK(stats->IsCounterAppropriateForThread(counter_id));
+ stats_->Enter(&timer_, counter_id);
+ }
+
+ inline ~RuntimeCallTimerScope() {
+ if (V8_UNLIKELY(stats_ != nullptr)) {
+ stats_->Leave(&timer_);
+ }
+ }
+
+ RuntimeCallTimerScope(const RuntimeCallTimerScope&) = delete;
+ RuntimeCallTimerScope& operator=(const RuntimeCallTimerScope&) = delete;
+
+ private:
+ RuntimeCallStats* stats_ = nullptr;
+ RuntimeCallTimer timer_;
+};
+
+#else // RUNTIME_CALL_STATS
+
+#define RCS_SCOPE(...)
+#define TRACE_HANDLER_STATS(...)
+#define CHANGE_CURRENT_RUNTIME_COUNTER(...)
+
+// Create dummy types to limit code changes
+class WorkerThreadRuntimeCallStats {};
+
+class RuntimeCallStats {
+ public:
+ enum ThreadType { kMainIsolateThread, kWorkerThread };
+ explicit V8_EXPORT_PRIVATE RuntimeCallStats(ThreadType thread_type) {}
+};
+
+class WorkerThreadRuntimeCallStatsScope {
+ public:
+ explicit WorkerThreadRuntimeCallStatsScope(
+ WorkerThreadRuntimeCallStats* off_thread_stats) {}
+ RuntimeCallStats* Get() const { return nullptr; }
+};
+
+#endif // RUNTIME_CALL_STATS
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LOGGING_RUNTIME_CALL_STATS_H_
diff --git a/chromium/v8/src/numbers/conversions.cc b/chromium/v8/src/numbers/conversions.cc
index d7c31c2e71c..4a018854dd8 100644
--- a/chromium/v8/src/numbers/conversions.cc
+++ b/chromium/v8/src/numbers/conversions.cc
@@ -182,23 +182,23 @@ enum class Sign { kNegative, kPositive, kNone };
// ES6 18.2.5 parseInt(string, radix) (with NumberParseIntHelper subclass);
// and BigInt parsing cases from https://tc39.github.io/proposal-bigint/
// (with StringToBigIntHelper subclass).
-template <typename LocalIsolate>
+template <typename IsolateT>
class StringToIntHelper {
public:
- StringToIntHelper(LocalIsolate* isolate, Handle<String> subject, int radix)
+ StringToIntHelper(IsolateT* isolate, Handle<String> subject, int radix)
: isolate_(isolate), subject_(subject), radix_(radix) {
DCHECK(subject->IsFlat());
}
// Used for the StringToBigInt operation.
- StringToIntHelper(LocalIsolate* isolate, Handle<String> subject)
+ StringToIntHelper(IsolateT* isolate, Handle<String> subject)
: isolate_(isolate), subject_(subject) {
DCHECK(subject->IsFlat());
}
// Used for parsing BigInt literals, where the input is a Zone-allocated
// buffer of one-byte digits, along with an optional radix prefix.
- StringToIntHelper(LocalIsolate* isolate, const uint8_t* subject, int length)
+ StringToIntHelper(IsolateT* isolate, const uint8_t* subject, int length)
: isolate_(isolate), raw_one_byte_subject_(subject), length_(length) {}
virtual ~StringToIntHelper() = default;
@@ -239,7 +239,7 @@ class StringToIntHelper {
return subject_->GetFlatContent(no_gc).ToUC16Vector();
}
- LocalIsolate* isolate() { return isolate_; }
+ IsolateT* isolate() { return isolate_; }
int radix() { return radix_; }
int cursor() { return cursor_; }
int length() { return length_; }
@@ -254,7 +254,7 @@ class StringToIntHelper {
template <class Char>
bool ParseChunkInternal(Char start);
- LocalIsolate* isolate_;
+ IsolateT* isolate_;
Handle<String> subject_;
const uint8_t* raw_one_byte_subject_ = nullptr;
int radix_ = 0;
@@ -267,8 +267,8 @@ class StringToIntHelper {
State state_ = State::kRunning;
};
-template <typename LocalIsolate>
-void StringToIntHelper<LocalIsolate>::ParseInt() {
+template <typename IsolateT>
+void StringToIntHelper<IsolateT>::ParseInt() {
{
DisallowGarbageCollection no_gc;
if (IsOneByte()) {
@@ -311,10 +311,10 @@ void StringToIntHelper<LocalIsolate>::ParseInt() {
DCHECK_NE(state_, State::kRunning);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
template <class Char>
-void StringToIntHelper<LocalIsolate>::DetectRadixInternal(Char current,
- int length) {
+void StringToIntHelper<IsolateT>::DetectRadixInternal(Char current,
+ int length) {
Char start = current;
length_ = length;
Char end = start + length;
@@ -391,9 +391,9 @@ void StringToIntHelper<LocalIsolate>::DetectRadixInternal(Char current,
cursor_ = static_cast<int>(current - start);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
template <class Char>
-bool StringToIntHelper<LocalIsolate>::ParseChunkInternal(Char start) {
+bool StringToIntHelper<IsolateT>::ParseChunkInternal(Char start) {
const int kChunkSize = 10240;
Char current = start + cursor_;
Char end = start + length_;
@@ -860,14 +860,14 @@ double StringToInt(Isolate* isolate, Handle<String> string, int radix) {
return helper.GetResult();
}
-template <typename LocalIsolate>
-class StringToBigIntHelper : public StringToIntHelper<LocalIsolate> {
+template <typename IsolateT>
+class StringToBigIntHelper : public StringToIntHelper<IsolateT> {
public:
enum class Behavior { kStringToBigInt, kLiteral };
// Used for StringToBigInt operation (BigInt constructor and == operator).
- StringToBigIntHelper(LocalIsolate* isolate, Handle<String> string)
- : StringToIntHelper<LocalIsolate>(isolate, string),
+ StringToBigIntHelper(IsolateT* isolate, Handle<String> string)
+ : StringToIntHelper<IsolateT>(isolate, string),
behavior_(Behavior::kStringToBigInt) {
this->set_allow_binary_and_octal_prefixes();
this->set_disallow_trailing_junk();
@@ -875,8 +875,8 @@ class StringToBigIntHelper : public StringToIntHelper<LocalIsolate> {
// Used for parsing BigInt literals, where the input is a buffer of
// one-byte ASCII digits, along with an optional radix prefix.
- StringToBigIntHelper(LocalIsolate* isolate, const uint8_t* string, int length)
- : StringToIntHelper<LocalIsolate>(isolate, string, length),
+ StringToBigIntHelper(IsolateT* isolate, const uint8_t* string, int length)
+ : StringToIntHelper<IsolateT>(isolate, string, length),
behavior_(Behavior::kLiteral) {
this->set_allow_binary_and_octal_prefixes();
}
@@ -943,8 +943,8 @@ class StringToBigIntHelper : public StringToIntHelper<LocalIsolate> {
Behavior behavior_;
};
-template <typename LocalIsolate>
-bool StringToBigIntHelper<LocalIsolate>::CheckTermination() {
+template <typename IsolateT>
+bool StringToBigIntHelper<IsolateT>::CheckTermination() {
return false;
}
@@ -961,9 +961,9 @@ MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string) {
return helper.GetResult();
}
-template <typename LocalIsolate>
-MaybeHandle<BigInt> BigIntLiteral(LocalIsolate* isolate, const char* string) {
- StringToBigIntHelper<LocalIsolate> helper(
+template <typename IsolateT>
+MaybeHandle<BigInt> BigIntLiteral(IsolateT* isolate, const char* string) {
+ StringToBigIntHelper<IsolateT> helper(
isolate, reinterpret_cast<const uint8_t*>(string),
static_cast<int>(strlen(string)));
return helper.GetResult();
diff --git a/chromium/v8/src/numbers/conversions.h b/chromium/v8/src/numbers/conversions.h
index d4a09e1b20c..935d6f57e28 100644
--- a/chromium/v8/src/numbers/conversions.h
+++ b/chromium/v8/src/numbers/conversions.h
@@ -101,9 +101,9 @@ MaybeHandle<BigInt> StringToBigInt(Isolate* isolate, Handle<String> string);
// 0x -> hex
// 0o -> octal
// 0b -> binary
-template <typename LocalIsolate>
+template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
-MaybeHandle<BigInt> BigIntLiteral(LocalIsolate* isolate, const char* string);
+MaybeHandle<BigInt> BigIntLiteral(IsolateT* isolate, const char* string);
const int kDoubleToCStringMinBufferSize = 100;
diff --git a/chromium/v8/src/numbers/dtoa.h b/chromium/v8/src/numbers/dtoa.h
index da67c180e8c..cf540f1213a 100644
--- a/chromium/v8/src/numbers/dtoa.h
+++ b/chromium/v8/src/numbers/dtoa.h
@@ -23,9 +23,14 @@ enum DtoaMode {
DTOA_PRECISION
};
-// The maximal length of digits a double can have in base 10.
-// Note that DoubleToAscii null-terminates its input. So the given buffer should
-// be at least kBase10MaximalLength + 1 characters long.
+// The maximal length of digits a double can have in base 10 as returned by
+// 'DoubleToAscii'. This does neither include sign, decimal point nor exponent.
+// For example DoubleToAscii(-3.5844466002796428e+298, ..., buffer, ...) will
+// fill buffer with the string "35844466002796428", while sign and decimal point
+// position will be provided through additional output arguments.
+// kBase10MaximalLength refers to the maximal length of this string. Note that
+// DoubleToAscii null-terminates its input. So the given buffer should be at
+// least kBase10MaximalLength + 1 characters long.
const int kBase10MaximalLength = 17;
// Converts the given double 'v' to ASCII.
diff --git a/chromium/v8/src/objects/all-objects-inl.h b/chromium/v8/src/objects/all-objects-inl.h
index 78c239fd773..5a78cefc8d0 100644
--- a/chromium/v8/src/objects/all-objects-inl.h
+++ b/chromium/v8/src/objects/all-objects-inl.h
@@ -52,6 +52,7 @@
#include "src/objects/lookup-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/maybe-object-inl.h"
+#include "src/objects/megadom-handler-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/name-inl.h"
diff --git a/chromium/v8/src/objects/allocation-site-inl.h b/chromium/v8/src/objects/allocation-site-inl.h
index d9911bc8266..68e91e68a5a 100644
--- a/chromium/v8/src/objects/allocation-site-inl.h
+++ b/chromium/v8/src/objects/allocation-site-inl.h
@@ -27,6 +27,8 @@ CAST_ACCESSOR(AllocationSite)
ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
kTransitionInfoOrBoilerplateOffset)
+RELEASE_ACQUIRE_ACCESSORS(AllocationSite, transition_info_or_boilerplate,
+ Object, kTransitionInfoOrBoilerplateOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
INT32_ACCESSORS(AllocationSite, pretenure_create_count,
@@ -41,8 +43,14 @@ JSObject AllocationSite::boilerplate() const {
return JSObject::cast(transition_info_or_boilerplate());
}
-void AllocationSite::set_boilerplate(JSObject object, WriteBarrierMode mode) {
- set_transition_info_or_boilerplate(object, mode);
+JSObject AllocationSite::boilerplate(AcquireLoadTag tag) const {
+ DCHECK(PointsToLiteral());
+ return JSObject::cast(transition_info_or_boilerplate(tag));
+}
+
+void AllocationSite::set_boilerplate(JSObject value, ReleaseStoreTag tag,
+ WriteBarrierMode mode) {
+ set_transition_info_or_boilerplate(value, tag, mode);
}
int AllocationSite::transition_info() const {
diff --git a/chromium/v8/src/objects/allocation-site.h b/chromium/v8/src/objects/allocation-site.h
index 437876d94cc..549e82a4da9 100644
--- a/chromium/v8/src/objects/allocation-site.h
+++ b/chromium/v8/src/objects/allocation-site.h
@@ -40,7 +40,9 @@ class AllocationSite : public Struct {
// Contains either a Smi-encoded bitfield or a boilerplate. If it's a Smi the
// AllocationSite is for a constructed Array.
DECL_ACCESSORS(transition_info_or_boilerplate, Object)
- DECL_ACCESSORS(boilerplate, JSObject)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(transition_info_or_boilerplate, Object)
+ DECL_GETTER(boilerplate, JSObject)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(boilerplate, JSObject)
DECL_INT_ACCESSORS(transition_info)
// nested_site threads a list of sites that represent nested literals
@@ -68,9 +70,9 @@ class AllocationSite : public Struct {
bool IsNested();
// transition_info bitfields, for constructed array transition info.
- using ElementsKindBits = base::BitField<ElementsKind, 0, 5>;
- using DoNotInlineBit = base::BitField<bool, 5, 1>;
- // Unused bits 6-30.
+ using ElementsKindBits = base::BitField<ElementsKind, 0, 6>;
+ using DoNotInlineBit = base::BitField<bool, 6, 1>;
+ // Unused bits 7-30.
// Bitfields for pretenure_data
using MementoFoundCountBits = base::BitField<int, 0, 26>;
diff --git a/chromium/v8/src/objects/api-callbacks.h b/chromium/v8/src/objects/api-callbacks.h
index f5d81dd9861..a55ca5eb62f 100644
--- a/chromium/v8/src/objects/api-callbacks.h
+++ b/chromium/v8/src/objects/api-callbacks.h
@@ -64,6 +64,8 @@ class AccessorInfo : public TorqueGeneratedAccessorInfo<AccessorInfo, Struct> {
static int AppendUnique(Isolate* isolate, Handle<Object> descriptors,
Handle<FixedArray> array, int valid_descriptors);
+ DECL_PRINTER(AccessorInfo)
+
private:
inline bool HasExpectedReceiverType();
diff --git a/chromium/v8/src/objects/arguments.tq b/chromium/v8/src/objects/arguments.tq
index b65464688a7..6f8e02fdbc9 100644
--- a/chromium/v8/src/objects/arguments.tq
+++ b/chromium/v8/src/objects/arguments.tq
@@ -80,7 +80,7 @@ extern shape JSStrictArgumentsObject extends JSArgumentsObject {
class SloppyArgumentsElements extends FixedArrayBase {
context: Context;
arguments: FixedArray|NumberDictionary;
- @relaxedRead mapped_entries[length]: Smi|TheHole;
+ @cppRelaxedLoad mapped_entries[length]: Smi|TheHole;
}
macro NewSloppyArgumentsElements<Iterator: type>(
diff --git a/chromium/v8/src/objects/backing-store.cc b/chromium/v8/src/objects/backing-store.cc
index 08288ef62c0..77eaf3a722c 100644
--- a/chromium/v8/src/objects/backing-store.cc
+++ b/chromium/v8/src/objects/backing-store.cc
@@ -37,6 +37,14 @@ namespace {
constexpr size_t kPlatformMaxPages =
std::numeric_limits<size_t>::max() / wasm::kWasmPageSize;
+constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
+
+#if V8_TARGET_ARCH_64_BIT
+constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
+#endif
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
@@ -50,12 +58,6 @@ constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif
-constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB;
-
-#if V8_TARGET_ARCH_64_BIT
-constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
-#endif
-
std::atomic<uint64_t> reserved_address_space_{0};
// Allocation results are reported to UMA
@@ -75,7 +77,7 @@ enum class AllocationStatus {
base::AddressRegion GetReservedRegion(bool has_guard_regions,
void* buffer_start,
size_t byte_capacity) {
-#if V8_TARGET_ARCH_64_BIT
+#if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
if (has_guard_regions) {
// Guard regions always look like this:
// |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
@@ -97,7 +99,7 @@ base::AddressRegion GetReservedRegion(bool has_guard_regions,
}
size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) {
-#if V8_TARGET_ARCH_64_BIT
+#if V8_TARGET_ARCH_64_BIT && V8_ENABLE_WEBASSEMBLY
if (has_guard_regions) return kFullGuardSize;
#else
DCHECK(!has_guard_regions);
@@ -110,7 +112,6 @@ void RecordStatus(Isolate* isolate, AllocationStatus status) {
isolate->counters()->wasm_memory_allocation_result()->AddSample(
static_cast<int>(status));
}
-#endif // V8_ENABLE_WEBASSEMBLY
inline void DebugCheckZero(void* start, size_t byte_length) {
#if DEBUG
@@ -160,6 +161,8 @@ BackingStore::~BackingStore() {
#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory_) {
+ // TODO(v8:11111): RAB / GSAB - Wasm integration.
+ DCHECK(!is_resizable_);
DCHECK(free_on_destruct_);
DCHECK(!custom_deleter_);
size_t reservation_size =
@@ -189,6 +192,23 @@ BackingStore::~BackingStore() {
}
#endif // V8_ENABLE_WEBASSEMBLY
+ if (is_resizable_) {
+ DCHECK(free_on_destruct_);
+ DCHECK(!custom_deleter_);
+ size_t reservation_size =
+ GetReservationSize(has_guard_regions_, byte_capacity_);
+ auto region =
+ GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_);
+
+ bool pages_were_freed =
+ region.size() == 0 /* no need to free any pages */ ||
+ FreePages(GetPlatformPageAllocator(),
+ reinterpret_cast<void*>(region.begin()), region.size());
+ CHECK(pages_were_freed);
+ BackingStore::ReleaseReservation(reservation_size);
+ Clear();
+ return;
+ }
if (custom_deleter_) {
DCHECK(free_on_destruct_);
TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n",
@@ -252,15 +272,16 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
}
}
- auto result = new BackingStore(buffer_start, // start
- byte_length, // length
- byte_length, // capacity
- shared, // shared
- false, // is_wasm_memory
- true, // free_on_destruct
- false, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ auto result = new BackingStore(buffer_start, // start
+ byte_length, // length
+ byte_length, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
+ false, // is_wasm_memory
+ true, // free_on_destruct
+ false, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
TRACE_BS("BS:alloc bs=%p mem=%p (length=%zu)\n", result,
result->buffer_start(), byte_length);
@@ -281,6 +302,27 @@ void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) {
}
#if V8_ENABLE_WEBASSEMBLY
+// Allocate a backing store for a Wasm memory. Always use the page allocator
+// and add guard regions.
+std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
+ Isolate* isolate, size_t initial_pages, size_t maximum_pages,
+ SharedFlag shared) {
+ // Compute size of reserved memory.
+ size_t engine_max_pages = wasm::max_mem_pages();
+ maximum_pages = std::min(engine_max_pages, maximum_pages);
+
+ auto result = TryAllocateAndPartiallyCommitMemory(
+ isolate, initial_pages * wasm::kWasmPageSize, wasm::kWasmPageSize,
+ initial_pages, maximum_pages, true, shared);
+ // Shared Wasm memories need an anchor for the memory object list.
+ if (result && shared == SharedFlag::kShared) {
+ result->type_specific_data_.shared_wasm_memory_data =
+ new SharedWasmMemoryData();
+ }
+ return result;
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
uint64_t old_count = reserved_address_space_.load(std::memory_order_relaxed);
@@ -300,17 +342,21 @@ void BackingStore::ReleaseReservation(uint64_t num_bytes) {
DCHECK_LE(num_bytes, old_reserved);
}
-// Allocate a backing store for a Wasm memory. Always use the page allocator
-// and add guard regions.
-std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
- Isolate* isolate, size_t initial_pages, size_t maximum_pages,
+std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
+ Isolate* isolate, size_t byte_length, size_t page_size,
+ size_t initial_pages, size_t maximum_pages, bool is_wasm_memory,
SharedFlag shared) {
+ // Enforce engine limitation on the maximum number of pages.
+ if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) {
+ return nullptr;
+ }
+
// Cannot reserve 0 pages on some OSes.
if (maximum_pages == 0) maximum_pages = 1;
TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages);
- bool guards = trap_handler::IsTrapHandlerEnabled();
+ bool guards = is_wasm_memory && trap_handler::IsTrapHandlerEnabled();
// For accounting purposes, whether a GC was necessary.
bool did_retry = false;
@@ -329,16 +375,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
return false;
};
- // Compute size of reserved memory.
-
- size_t engine_max_pages = wasm::max_mem_pages();
- maximum_pages = std::min(engine_max_pages, maximum_pages);
- // If the platform doesn't support so many pages, attempting to allocate
- // is guaranteed to fail, so we don't even try.
- if (maximum_pages > kPlatformMaxPages) return {};
- CHECK_LE(maximum_pages,
- std::numeric_limits<size_t>::max() / wasm::kWasmPageSize);
- size_t byte_capacity = maximum_pages * wasm::kWasmPageSize;
+ size_t byte_capacity = maximum_pages * page_size;
size_t reservation_size = GetReservationSize(guards, byte_capacity);
//--------------------------------------------------------------------------
@@ -366,7 +403,7 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
auto allocate_pages = [&] {
allocation_base =
AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
- wasm::kWasmPageSize, PageAllocator::kNoAccess);
+ page_size, PageAllocator::kNoAccess);
return allocation_base != nullptr;
};
if (!gc_retry(allocate_pages)) {
@@ -379,23 +416,27 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
+#if V8_ENABLE_WEBASSEMBLY
byte* buffer_start = reinterpret_cast<byte*>(allocation_base) +
(guards ? kNegativeGuardSize : 0);
-
+#else
+ DCHECK(!guards);
+ byte* buffer_start = reinterpret_cast<byte*>(allocation_base);
+#endif
//--------------------------------------------------------------------------
// 3. Commit the initial pages (allow read/write).
//--------------------------------------------------------------------------
- size_t byte_length = initial_pages * wasm::kWasmPageSize;
+ size_t committed_byte_length = initial_pages * page_size;
auto commit_memory = [&] {
- return byte_length == 0 ||
- SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length,
- PageAllocator::kReadWrite);
+ return committed_byte_length == 0 ||
+ SetPermissions(GetPlatformPageAllocator(), buffer_start,
+ committed_byte_length, PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
- byte_length);
+ committed_byte_length);
// SetPermissions put us over the process memory limit.
- V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
+ V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateMemory()");
}
DebugCheckZero(buffer_start, byte_length); // touch the bytes.
@@ -403,30 +444,29 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
- auto result = new BackingStore(buffer_start, // start
- byte_length, // length
- byte_capacity, // capacity
- shared, // shared
- true, // is_wasm_memory
- true, // free_on_destruct
- guards, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ ResizableFlag resizable =
+ is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable;
+
+ auto result = new BackingStore(buffer_start, // start
+ byte_length, // length
+ byte_capacity, // capacity
+ shared, // shared
+ resizable, // resizable
+ is_wasm_memory, // is_wasm_memory
+ true, // free_on_destruct
+ guards, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
TRACE_BS(
"BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n",
result, result->buffer_start(), byte_length, byte_capacity,
reservation_size);
- // Shared Wasm memories need an anchor for the memory object list.
- if (shared == SharedFlag::kShared) {
- result->type_specific_data_.shared_wasm_memory_data =
- new SharedWasmMemoryData();
- }
-
return std::unique_ptr<BackingStore>(result);
}
+#if V8_ENABLE_WEBASSEMBLY
// Allocate a backing store for a Wasm memory. Always use the page allocator
// and add guard regions.
std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
@@ -583,18 +623,102 @@ void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
}
#endif // V8_ENABLE_WEBASSEMBLY
+// Commit already reserved memory (for RAB backing stores (not shared)).
+BackingStore::ResizeOrGrowResult BackingStore::ResizeInPlace(
+ Isolate* isolate, size_t new_byte_length, size_t new_committed_length) {
+ DCHECK_LE(new_byte_length, new_committed_length);
+ DCHECK(!is_shared());
+
+ if (new_byte_length < byte_length_) {
+ // TOOO(v8:11111): Figure out a strategy for shrinking - when do we
+ // un-commit the memory?
+
+ // Zero the memory so that in case the buffer is grown later, we have
+ // zeroed the contents already.
+ memset(reinterpret_cast<byte*>(buffer_start_) + new_byte_length, 0,
+ byte_length_ - new_byte_length);
+
+ // Changing the byte length wouldn't strictly speaking be needed, since
+ // the JSArrayBuffer already stores the updated length. This is to keep
+ // the BackingStore and JSArrayBuffer in sync.
+ byte_length_ = new_byte_length;
+ return kSuccess;
+ }
+ if (new_byte_length == byte_length_) {
+ // i::SetPermissions with size 0 fails on some platforms, so special
+ // handling for the case byte_length_ == new_byte_length == 0 is required.
+ return kSuccess;
+ }
+
+ // Try to adjust the permissions on the memory.
+ if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
+ new_committed_length, PageAllocator::kReadWrite)) {
+ return kFailure;
+ }
+
+ // Do per-isolate accounting for non-shared backing stores.
+ DCHECK(free_on_destruct_);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(new_byte_length - byte_length_);
+ byte_length_ = new_byte_length;
+ return kSuccess;
+}
+
+// Commit already reserved memory (for GSAB backing stores (shared)).
+BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace(
+ Isolate* isolate, size_t new_byte_length, size_t new_committed_length) {
+ DCHECK_LE(new_byte_length, new_committed_length);
+ DCHECK(is_shared());
+ // See comment in GrowWasmMemoryInPlace.
+ // GrowableSharedArrayBuffer.prototype.grow can be called from several
+ // threads. If two threads try to grow() in a racy way, the spec allows the
+ // larger grow to throw also if the smaller grow succeeds first. The
+ // implementation below doesn't throw in that case - instead, it retries and
+ // succeeds. If the larger grow finishes first though, the smaller grow must
+ // throw.
+ size_t old_byte_length = byte_length_.load(std::memory_order_seq_cst);
+ while (true) {
+ if (new_byte_length < old_byte_length) {
+ // The caller checks for the new_byte_length < old_byte_length_ case. This
+ // can only happen if another thread grew the memory after that.
+ return kRace;
+ }
+ if (new_byte_length == old_byte_length) {
+ // i::SetPermissions with size 0 fails on some platforms, so special
+ // handling for the case old_byte_length == new_byte_length == 0 is
+ // required.
+ return kSuccess;
+ }
+
+ // Try to adjust the permissions on the memory.
+ if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
+ new_committed_length, PageAllocator::kReadWrite)) {
+ return kFailure;
+ }
+
+ // compare_exchange_weak updates old_byte_length.
+ if (byte_length_.compare_exchange_weak(old_byte_length, new_byte_length,
+ std::memory_order_seq_cst)) {
+ // Successfully updated both the length and permissions.
+ break;
+ }
+ }
+ return kSuccess;
+}
+
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
SharedFlag shared, bool free_on_destruct) {
- auto result = new BackingStore(allocation_base, // start
- allocation_length, // length
- allocation_length, // capacity
- shared, // shared
- false, // is_wasm_memory
- free_on_destruct, // free_on_destruct
- false, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ auto result = new BackingStore(allocation_base, // start
+ allocation_length, // length
+ allocation_length, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
+ false, // is_wasm_memory
+ free_on_destruct, // free_on_destruct
+ false, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
result->SetAllocatorFromIsolate(isolate);
TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result,
result->buffer_start(), result->byte_length());
@@ -606,10 +730,11 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared) {
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
- auto result = new BackingStore(allocation_base, // start
- allocation_length, // length
- allocation_length, // capacity
- shared, // shared
+ auto result = new BackingStore(allocation_base, // start
+ allocation_length, // length
+ allocation_length, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
false, // is_wasm_memory
true, // free_on_destruct
false, // has_guard_regions
@@ -623,15 +748,16 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore(
SharedFlag shared) {
- auto result = new BackingStore(nullptr, // start
- 0, // length
- 0, // capacity
- shared, // shared
- false, // is_wasm_memory
- true, // free_on_destruct
- false, // has_guard_regions
- false, // custom_deleter
- false); // empty_deleter
+ auto result = new BackingStore(nullptr, // start
+ 0, // length
+ 0, // capacity
+ shared, // shared
+ ResizableFlag::kNotResizable, // resizable
+ false, // is_wasm_memory
+ true, // free_on_destruct
+ false, // has_guard_regions
+ false, // custom_deleter
+ false); // empty_deleter
return std::unique_ptr<BackingStore>(result);
}
diff --git a/chromium/v8/src/objects/backing-store.h b/chromium/v8/src/objects/backing-store.h
index eb879d5e8ad..5598388d3b9 100644
--- a/chromium/v8/src/objects/backing-store.h
+++ b/chromium/v8/src/objects/backing-store.h
@@ -21,6 +21,9 @@ class WasmMemoryObject;
// Whether the backing store is shared or not.
enum class SharedFlag : uint8_t { kNotShared, kShared };
+// Whether the backing store is resizable or not.
+enum class ResizableFlag : uint8_t { kNotResizable, kResizable };
+
// Whether the backing store memory is initialied to zero or not.
enum class InitializedFlag : uint8_t { kUninitialized, kZeroInitialized };
@@ -56,6 +59,12 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
SharedFlag shared);
#endif // V8_ENABLE_WEBASSEMBLY
+ // Tries to allocate `maximum_pages` of memory and commit `initial_pages`.
+ static std::unique_ptr<BackingStore> TryAllocateAndPartiallyCommitMemory(
+ Isolate* isolate, size_t byte_length, size_t page_size,
+ size_t initial_pages, size_t maximum_pages, bool is_wasm_memory,
+ SharedFlag shared);
+
// Create a backing store that wraps existing allocated memory.
// If {free_on_destruct} is {true}, the memory will be freed using the
// ArrayBufferAllocator::Free() callback when this backing store is
@@ -77,15 +86,24 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
// Accessors.
void* buffer_start() const { return buffer_start_; }
- size_t byte_length() const {
- return byte_length_.load(std::memory_order_relaxed);
+ size_t byte_length(
+ std::memory_order memory_order = std::memory_order_relaxed) const {
+ return byte_length_.load(memory_order);
}
size_t byte_capacity() const { return byte_capacity_; }
bool is_shared() const { return is_shared_; }
+ bool is_resizable() const { return is_resizable_; }
bool is_wasm_memory() const { return is_wasm_memory_; }
bool has_guard_regions() const { return has_guard_regions_; }
bool free_on_destruct() const { return free_on_destruct_; }
+ enum ResizeOrGrowResult { kSuccess, kFailure, kRace };
+
+ ResizeOrGrowResult ResizeInPlace(Isolate* isolate, size_t new_byte_length,
+ size_t new_committed_length);
+ ResizeOrGrowResult GrowInPlace(Isolate* isolate, size_t new_byte_length,
+ size_t new_committed_length);
+
// Wrapper around ArrayBuffer::Allocator::Reallocate.
bool Reallocate(Isolate* isolate, size_t new_byte_length);
@@ -111,12 +129,6 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static void BroadcastSharedWasmMemoryGrow(Isolate* isolate,
std::shared_ptr<BackingStore>);
- // TODO(wasm): address space limitations should be enforced in page alloc.
- // These methods enforce a limit on the total amount of address space,
- // which is used for both backing stores and wasm memory.
- static bool ReserveAddressSpace(uint64_t num_bytes);
- static void ReleaseReservation(uint64_t num_bytes);
-
// Remove all memory objects in the given isolate that refer to this
// backing store.
static void RemoveSharedWasmMemoryObjects(Isolate* isolate);
@@ -125,6 +137,12 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
#endif // V8_ENABLE_WEBASSEMBLY
+ // TODO(wasm): address space limitations should be enforced in page alloc.
+ // These methods enforce a limit on the total amount of address space,
+ // which is used for both backing stores and wasm memory.
+ static bool ReserveAddressSpace(uint64_t num_bytes);
+ static void ReleaseReservation(uint64_t num_bytes);
+
// Returns the size of the external memory owned by this backing store.
// It is used for triggering GCs based on the external memory pressure.
size_t PerIsolateAccountingLength() {
@@ -148,19 +166,26 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
friend class GlobalBackingStoreRegistry;
BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity,
- SharedFlag shared, bool is_wasm_memory, bool free_on_destruct,
- bool has_guard_regions, bool custom_deleter, bool empty_deleter)
+ SharedFlag shared, ResizableFlag resizable, bool is_wasm_memory,
+ bool free_on_destruct, bool has_guard_regions,
+ bool custom_deleter, bool empty_deleter)
: buffer_start_(buffer_start),
byte_length_(byte_length),
byte_capacity_(byte_capacity),
is_shared_(shared == SharedFlag::kShared),
+ is_resizable_(resizable == ResizableFlag::kResizable),
is_wasm_memory_(is_wasm_memory),
holds_shared_ptr_to_allocator_(false),
free_on_destruct_(free_on_destruct),
has_guard_regions_(has_guard_regions),
globally_registered_(false),
custom_deleter_(custom_deleter),
- empty_deleter_(empty_deleter) {}
+ empty_deleter_(empty_deleter) {
+ // TODO(v8:11111): RAB / GSAB - Wasm integration.
+ DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_);
+ DCHECK_IMPLIES(is_resizable_, !custom_deleter_);
+ DCHECK_IMPLIES(is_resizable_, free_on_destruct_);
+ }
BackingStore(const BackingStore&) = delete;
BackingStore& operator=(const BackingStore&) = delete;
void SetAllocatorFromIsolate(Isolate* isolate);
@@ -199,6 +224,8 @@ class V8_EXPORT_PRIVATE BackingStore : public BackingStoreBase {
} type_specific_data_;
bool is_shared_ : 1;
+ // Backing stores for (Resizable|GrowableShared)ArrayBuffer
+ bool is_resizable_ : 1;
bool is_wasm_memory_ : 1;
bool holds_shared_ptr_to_allocator_ : 1;
bool free_on_destruct_ : 1;
diff --git a/chromium/v8/src/objects/bigint.cc b/chromium/v8/src/objects/bigint.cc
index cffac5af8e1..9614d4d1461 100644
--- a/chromium/v8/src/objects/bigint.cc
+++ b/chromium/v8/src/objects/bigint.cc
@@ -51,21 +51,20 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
static void Canonicalize(MutableBigInt result);
// Allocation helpers.
- template <typename LocalIsolate>
+ template <typename IsolateT>
static MaybeHandle<MutableBigInt> New(
- LocalIsolate* isolate, int length,
+ IsolateT* isolate, int length,
AllocationType allocation = AllocationType::kYoung);
static Handle<BigInt> NewFromInt(Isolate* isolate, int value);
static Handle<BigInt> NewFromDouble(Isolate* isolate, double value);
void InitializeDigits(int length, byte value = 0);
static Handle<MutableBigInt> Copy(Isolate* isolate,
Handle<BigIntBase> source);
- template <typename LocalIsolate>
+ template <typename IsolateT>
static Handle<BigInt> Zero(
- LocalIsolate* isolate,
- AllocationType allocation = AllocationType::kYoung) {
+ IsolateT* isolate, AllocationType allocation = AllocationType::kYoung) {
// TODO(jkummerow): Consider caching a canonical zero-BigInt.
- return MakeImmutable<LocalIsolate>(
+ return MakeImmutable<IsolateT>(
New(isolate, 0, allocation).ToHandleChecked());
}
@@ -136,10 +135,6 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
Isolate* isolate, Handle<BigIntBase> x, Handle<BigIntBase> y,
MutableBigInt result_storage = MutableBigInt());
- static void MultiplyAccumulate(Handle<BigIntBase> multiplicand,
- digit_t multiplier,
- Handle<MutableBigInt> accumulator,
- int accumulator_index);
static void InternalMultiplyAdd(BigIntBase source, digit_t factor,
digit_t summand, int n, MutableBigInt result);
void InplaceMultiplyAdd(uintptr_t factor, uintptr_t summand);
@@ -211,7 +206,7 @@ class MutableBigInt : public FreshlyAllocatedBigInt {
bitfield = SignBits::update(bitfield, new_sign);
RELAXED_WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield);
}
- inline void synchronized_set_length(int new_length) {
+ inline void set_length(int new_length, ReleaseStoreTag) {
int32_t bitfield = RELAXED_READ_INT32_FIELD(*this, kBitfieldOffset);
bitfield = LengthBits::update(bitfield, new_length);
RELEASE_WRITE_INT32_FIELD(*this, kBitfieldOffset, bitfield);
@@ -243,14 +238,25 @@ NEVER_READ_ONLY_SPACE_IMPL(MutableBigInt)
#include "src/base/platform/wrappers.h"
#include "src/objects/object-macros-undef.h"
-struct GetDigits : bigint::Digits {
- explicit GetDigits(Handle<BigIntBase> bigint) : GetDigits(*bigint) {}
- explicit GetDigits(BigIntBase bigint)
- : bigint::Digits(
- reinterpret_cast<bigint::digit_t*>(
- bigint.ptr() + BigIntBase::kDigitsOffset - kHeapObjectTag),
- bigint.length()) {}
-};
+bigint::Digits GetDigits(BigIntBase bigint) {
+ return bigint::Digits(
+ reinterpret_cast<bigint::digit_t*>(
+ bigint.ptr() + BigIntBase::kDigitsOffset - kHeapObjectTag),
+ bigint.length());
+}
+bigint::Digits GetDigits(Handle<BigIntBase> bigint) {
+ return GetDigits(*bigint);
+}
+
+bigint::RWDigits GetRWDigits(MutableBigInt bigint) {
+ return bigint::RWDigits(
+ reinterpret_cast<bigint::digit_t*>(
+ bigint.ptr() + BigIntBase::kDigitsOffset - kHeapObjectTag),
+ bigint.length());
+}
+bigint::RWDigits GetRWDigits(Handle<MutableBigInt> bigint) {
+ return GetRWDigits(*bigint);
+}
template <typename T, typename Isolate>
MaybeHandle<T> ThrowBigIntTooBig(Isolate* isolate) {
@@ -266,8 +272,8 @@ MaybeHandle<T> ThrowBigIntTooBig(Isolate* isolate) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kBigIntTooBig), T);
}
-template <typename LocalIsolate>
-MaybeHandle<MutableBigInt> MutableBigInt::New(LocalIsolate* isolate, int length,
+template <typename IsolateT>
+MaybeHandle<MutableBigInt> MutableBigInt::New(IsolateT* isolate, int length,
AllocationType allocation) {
if (length > BigInt::kMaxLength) {
return ThrowBigIntTooBig<MutableBigInt>(isolate);
@@ -390,7 +396,7 @@ MaybeHandle<BigInt> MutableBigInt::MakeImmutable(
return MakeImmutable(result);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<BigInt> MutableBigInt::MakeImmutable(Handle<MutableBigInt> result) {
MutableBigInt::Canonicalize(*result);
return Handle<BigInt>::cast(result);
@@ -412,7 +418,7 @@ void MutableBigInt::Canonicalize(MutableBigInt result) {
// of the object changed significantly.
heap->CreateFillerObjectAt(new_end, size_delta, ClearRecordedSlots::kNo);
}
- result.synchronized_set_length(new_length);
+ result.set_length(new_length, kReleaseStore);
// Canonicalize -0n.
if (new_length == 0) {
@@ -424,8 +430,8 @@ void MutableBigInt::Canonicalize(MutableBigInt result) {
result.digit(result.length() - 1) != 0); // MSD is non-zero.
}
-template <typename LocalIsolate>
-Handle<BigInt> BigInt::Zero(LocalIsolate* isolate, AllocationType allocation) {
+template <typename IsolateT>
+Handle<BigInt> BigInt::Zero(IsolateT* isolate, AllocationType allocation) {
return MutableBigInt::Zero(isolate, allocation);
}
template Handle<BigInt> BigInt::Zero(Isolate* isolate,
@@ -531,29 +537,18 @@ MaybeHandle<BigInt> BigInt::Multiply(Isolate* isolate, Handle<BigInt> x,
Handle<BigInt> y) {
if (x->is_zero()) return x;
if (y->is_zero()) return y;
- int result_length = x->length() + y->length();
+ int result_length = bigint::MultiplyResultLength(GetDigits(x), GetDigits(y));
Handle<MutableBigInt> result;
if (!MutableBigInt::New(isolate, result_length).ToHandle(&result)) {
return MaybeHandle<BigInt>();
}
- result->InitializeDigits(result_length);
- uintptr_t work_estimate = 0;
- for (int i = 0; i < x->length(); i++) {
- MutableBigInt::MultiplyAccumulate(y, x->digit(i), result, i);
-
- // Multiplication can take a long time. Check for interrupt requests
- // every now and then (roughly every 10-20 of milliseconds -- rarely
- // enough not to create noticeable overhead, frequently enough not to
- // appear frozen).
- work_estimate += y->length();
- if (work_estimate > 5000000) {
- work_estimate = 0;
- StackLimitCheck interrupt_check(isolate);
- if (interrupt_check.InterruptRequested() &&
- isolate->stack_guard()->HandleInterrupts().IsException(isolate)) {
- return MaybeHandle<BigInt>();
- }
- }
+ DisallowGarbageCollection no_gc;
+ bigint::Status status = isolate->bigint_processor()->Multiply(
+ GetRWDigits(result), GetDigits(x), GetDigits(y));
+ if (status == bigint::Status::kInterrupted) {
+ AllowGarbageCollection terminating_anyway;
+ isolate->TerminateExecution();
+ return {};
}
result->set_sign(x->sign() != y->sign());
return MutableBigInt::MakeImmutable(result);
@@ -1449,46 +1444,6 @@ Handle<MutableBigInt> MutableBigInt::AbsoluteXor(Isolate* isolate,
[](digit_t a, digit_t b) { return a ^ b; });
}
-// Multiplies {multiplicand} with {multiplier} and adds the result to
-// {accumulator}, starting at {accumulator_index} for the least-significant
-// digit.
-// Callers must ensure that {accumulator} is big enough to hold the result.
-void MutableBigInt::MultiplyAccumulate(Handle<BigIntBase> multiplicand,
- digit_t multiplier,
- Handle<MutableBigInt> accumulator,
- int accumulator_index) {
- // This is a minimum requirement; the DCHECK in the second loop below
- // will enforce more as needed.
- DCHECK(accumulator->length() > multiplicand->length() + accumulator_index);
- if (multiplier == 0L) return;
- digit_t carry = 0;
- digit_t high = 0;
- for (int i = 0; i < multiplicand->length(); i++, accumulator_index++) {
- digit_t acc = accumulator->digit(accumulator_index);
- digit_t new_carry = 0;
- // Add last round's carryovers.
- acc = digit_add(acc, high, &new_carry);
- acc = digit_add(acc, carry, &new_carry);
- // Compute this round's multiplication.
- digit_t m_digit = multiplicand->digit(i);
- digit_t low = digit_mul(multiplier, m_digit, &high);
- acc = digit_add(acc, low, &new_carry);
- // Store result and prepare for next round.
- accumulator->set_digit(accumulator_index, acc);
- carry = new_carry;
- }
- for (; carry != 0 || high != 0; accumulator_index++) {
- DCHECK(accumulator_index < accumulator->length());
- digit_t acc = accumulator->digit(accumulator_index);
- digit_t new_carry = 0;
- acc = digit_add(acc, high, &new_carry);
- high = 0;
- acc = digit_add(acc, carry, &new_carry);
- accumulator->set_digit(accumulator_index, acc);
- carry = new_carry;
- }
-}
-
// Multiplies {source} with {factor} and adds {summand} to the result.
// {result} and {source} may be the same BigInt for inplace modification.
void MutableBigInt::InternalMultiplyAdd(BigIntBase source, digit_t factor,
@@ -1928,9 +1883,9 @@ constexpr uint8_t kMaxBitsPerChar[] = {
static const int kBitsPerCharTableShift = 5;
static const size_t kBitsPerCharTableMultiplier = 1u << kBitsPerCharTableShift;
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
- LocalIsolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ IsolateT* isolate, int radix, int charcount, ShouldThrow should_throw,
AllocationType allocation) {
DCHECK(2 <= radix && radix <= 36);
DCHECK_GE(charcount, 0);
@@ -1966,7 +1921,7 @@ template MaybeHandle<FreshlyAllocatedBigInt> BigInt::AllocateFor(
LocalIsolate* isolate, int radix, int charcount, ShouldThrow should_throw,
AllocationType allocation);
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<BigInt> BigInt::Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign) {
Handle<MutableBigInt> bigint = Handle<MutableBigInt>::cast(x);
bigint->set_sign(sign);
@@ -2249,7 +2204,7 @@ MaybeHandle<String> MutableBigInt::ToStringGeneric(Isolate* isolate,
if (sign) chars[pos++] = '-';
// Trim any over-allocation (which can happen due to conservative estimates).
if (pos < static_cast<int>(chars_required)) {
- result->synchronized_set_length(pos);
+ result->set_length(pos, kReleaseStore);
int string_size =
SeqOneByteString::SizeFor(static_cast<int>(chars_required));
int needed_size = SeqOneByteString::SizeFor(pos);
diff --git a/chromium/v8/src/objects/bigint.h b/chromium/v8/src/objects/bigint.h
index a7494a54c5a..19dfca348a8 100644
--- a/chromium/v8/src/objects/bigint.h
+++ b/chromium/v8/src/objects/bigint.h
@@ -38,7 +38,7 @@ class BigIntBase : public PrimitiveHeapObject {
}
// For use by the GC.
- inline int synchronized_length() const {
+ inline int length(AcquireLoadTag) const {
int32_t bitfield = ACQUIRE_READ_INT32_FIELD(*this, kBitfieldOffset);
return LengthBits::decode(static_cast<uint32_t>(bitfield));
}
@@ -242,22 +242,22 @@ class BigInt : public BigIntBase {
class BodyDescriptor;
private:
- template <typename LocalIsolate>
+ template <typename IsolateT>
friend class StringToBigIntHelper;
friend class ValueDeserializer;
friend class ValueSerializer;
// Special functions for StringToBigIntHelper:
- template <typename LocalIsolate>
- static Handle<BigInt> Zero(LocalIsolate* isolate, AllocationType allocation =
- AllocationType::kYoung);
- template <typename LocalIsolate>
+ template <typename IsolateT>
+ static Handle<BigInt> Zero(
+ IsolateT* isolate, AllocationType allocation = AllocationType::kYoung);
+ template <typename IsolateT>
static MaybeHandle<FreshlyAllocatedBigInt> AllocateFor(
- LocalIsolate* isolate, int radix, int charcount, ShouldThrow should_throw,
+ IsolateT* isolate, int radix, int charcount, ShouldThrow should_throw,
AllocationType allocation);
static void InplaceMultiplyAdd(FreshlyAllocatedBigInt x, uintptr_t factor,
uintptr_t summand);
- template <typename LocalIsolate>
+ template <typename IsolateT>
static Handle<BigInt> Finalize(Handle<FreshlyAllocatedBigInt> x, bool sign);
// Special functions for ValueSerializer/ValueDeserializer:
diff --git a/chromium/v8/src/objects/code-inl.h b/chromium/v8/src/objects/code-inl.h
index 6fd3355a42f..f53c585538d 100644
--- a/chromium/v8/src/objects/code-inl.h
+++ b/chromium/v8/src/objects/code-inl.h
@@ -450,8 +450,8 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
const int index = builtin_index();
return index != Builtins::kNoBuiltinId &&
(index == Builtins::kInterpreterEntryTrampoline ||
- index == Builtins::kInterpreterEnterBytecodeAdvance ||
- index == Builtins::kInterpreterEnterBytecodeDispatch);
+ index == Builtins::kInterpreterEnterAtBytecode ||
+ index == Builtins::kInterpreterEnterAtNextBytecode);
}
inline bool Code::is_baseline_trampoline_builtin() const {
@@ -715,7 +715,7 @@ bool Code::IsWeakObject(HeapObject object) {
}
bool Code::IsWeakObjectInOptimizedCode(HeapObject object) {
- Map map = object.synchronized_map();
+ Map map = object.map(kAcquireLoad);
InstanceType instance_type = map.instance_type();
if (InstanceTypeChecker::IsMap(instance_type)) {
return Map::cast(object).CanTransition();
diff --git a/chromium/v8/src/objects/code-kind.cc b/chromium/v8/src/objects/code-kind.cc
index 8d480c86db9..5c4ab5d2991 100644
--- a/chromium/v8/src/objects/code-kind.cc
+++ b/chromium/v8/src/objects/code-kind.cc
@@ -24,8 +24,6 @@ const char* CodeKindToMarker(CodeKind kind) {
return "~";
case CodeKind::BASELINE:
return "^";
- case CodeKind::NATIVE_CONTEXT_INDEPENDENT:
- return "-";
case CodeKind::TURBOPROP:
return "+";
case CodeKind::TURBOFAN:
diff --git a/chromium/v8/src/objects/code-kind.h b/chromium/v8/src/objects/code-kind.h
index 12f65ba18aa..888e04c42b5 100644
--- a/chromium/v8/src/objects/code-kind.h
+++ b/chromium/v8/src/objects/code-kind.h
@@ -28,7 +28,6 @@ namespace internal {
V(C_WASM_ENTRY) \
V(INTERPRETED_FUNCTION) \
V(BASELINE) \
- V(NATIVE_CONTEXT_INDEPENDENT) \
V(TURBOPROP) \
V(TURBOFAN)
@@ -38,14 +37,10 @@ enum class CodeKind {
#undef DEFINE_CODE_KIND_ENUM
};
STATIC_ASSERT(CodeKind::INTERPRETED_FUNCTION < CodeKind::TURBOPROP &&
- CodeKind::INTERPRETED_FUNCTION <
- CodeKind::NATIVE_CONTEXT_INDEPENDENT &&
CodeKind::INTERPRETED_FUNCTION < CodeKind::BASELINE);
-STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOPROP &&
- CodeKind::BASELINE < CodeKind::NATIVE_CONTEXT_INDEPENDENT);
+STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOPROP);
STATIC_ASSERT(CodeKind::BASELINE < CodeKind::TURBOFAN &&
- CodeKind::TURBOPROP < CodeKind::TURBOFAN &&
- CodeKind::NATIVE_CONTEXT_INDEPENDENT < CodeKind::TURBOFAN);
+ CodeKind::TURBOPROP < CodeKind::TURBOFAN);
#define V(...) +1
static constexpr int kCodeKindCount = CODE_KIND_LIST(V);
@@ -70,18 +65,10 @@ inline constexpr bool CodeKindIsUnoptimizedJSFunction(CodeKind kind) {
CodeKind::BASELINE);
}
-inline constexpr bool CodeKindIsNativeContextIndependentJSFunction(
- CodeKind kind) {
- return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
-}
-
inline constexpr bool CodeKindIsOptimizedJSFunction(CodeKind kind) {
- STATIC_ASSERT(static_cast<int>(CodeKind::NATIVE_CONTEXT_INDEPENDENT) + 1 ==
- static_cast<int>(CodeKind::TURBOPROP));
STATIC_ASSERT(static_cast<int>(CodeKind::TURBOPROP) + 1 ==
static_cast<int>(CodeKind::TURBOFAN));
- return base::IsInRange(kind, CodeKind::NATIVE_CONTEXT_INDEPENDENT,
- CodeKind::TURBOFAN);
+ return base::IsInRange(kind, CodeKind::TURBOPROP, CodeKind::TURBOFAN);
}
inline constexpr bool CodeKindIsJSFunction(CodeKind kind) {
@@ -94,9 +81,6 @@ inline constexpr bool CodeKindIsBuiltinOrJSFunction(CodeKind kind) {
}
inline constexpr bool CodeKindCanDeoptimize(CodeKind kind) {
- // Even though NCI code does not deopt by itself at the time of writing,
- // tests may trigger deopts manually and thus we cannot make a narrower
- // distinction here.
return CodeKindIsOptimizedJSFunction(kind);
}
@@ -104,9 +88,8 @@ inline constexpr bool CodeKindCanOSR(CodeKind kind) {
return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
}
-inline constexpr bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
- return kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
- (!FLAG_turboprop_as_toptier && kind == CodeKind::TURBOPROP);
+inline bool CodeKindIsOptimizedAndCanTierUp(CodeKind kind) {
+ return !FLAG_turboprop_as_toptier && kind == CodeKind::TURBOPROP;
}
inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
@@ -116,8 +99,7 @@ inline constexpr bool CodeKindCanTierUp(CodeKind kind) {
// The optimization marker field on the feedback vector has a dual purpose of
// controlling the tier-up workflow, and caching the produced code object for
-// access from multiple closures. The marker is not used for all code kinds
-// though, in particular it is not used when generating NCI code.
+// access from multiple closures.
inline constexpr bool CodeKindIsStoredInOptimizedCodeCache(CodeKind kind) {
return kind == CodeKind::TURBOFAN || kind == CodeKind::TURBOPROP;
}
@@ -128,9 +110,6 @@ inline OptimizationTier GetTierForCodeKind(CodeKind kind) {
return FLAG_turboprop_as_toptier ? OptimizationTier::kTopTier
: OptimizationTier::kMidTier;
}
- if (kind == CodeKind::NATIVE_CONTEXT_INDEPENDENT) {
- return OptimizationTier::kTopTier;
- }
return OptimizationTier::kNone;
}
@@ -169,11 +148,9 @@ DEFINE_OPERATORS_FOR_FLAGS(CodeKinds)
static constexpr CodeKinds kJSFunctionCodeKindsMask{
CodeKindFlag::INTERPRETED_FUNCTION | CodeKindFlag::TURBOFAN |
- CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT | CodeKindFlag::TURBOPROP |
- CodeKindFlag::BASELINE};
+ CodeKindFlag::TURBOPROP | CodeKindFlag::BASELINE};
static constexpr CodeKinds kOptimizedJSFunctionCodeKindsMask{
- CodeKindFlag::TURBOFAN | CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT |
- CodeKindFlag::TURBOPROP};
+ CodeKindFlag::TURBOFAN | CodeKindFlag::TURBOPROP};
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/objects/code.cc b/chromium/v8/src/objects/code.cc
index 7268f001ce9..5b4ad4406d0 100644
--- a/chromium/v8/src/objects/code.cc
+++ b/chromium/v8/src/objects/code.cc
@@ -145,22 +145,42 @@ int Code::OffHeapInstructionSize() const {
return d.InstructionSizeOfBuiltin(builtin_index());
}
+namespace {
+
+// Helper function for getting an EmbeddedData that can handle un-embedded
+// builtins when short builtin calls are enabled.
+inline EmbeddedData EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(Code code) {
+#if defined(V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE)
+ // GetIsolateFromWritableObject(*this) works for both read-only and writable
+ // objects when pointer compression is enabled with a per-Isolate cage.
+ return EmbeddedData::FromBlob(GetIsolateFromWritableObject(code));
+#elif defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE)
+ // When pointer compression is enabled with a shared cage, there is also a
+ // shared CodeRange. When short builtin calls are enabled, there is a single
+ // copy of the re-embedded builtins in the shared CodeRange, so use that if
+ // it's present.
+ if (FLAG_jitless) return EmbeddedData::FromBlob();
+ CodeRange* code_range = CodeRange::GetProcessWideCodeRange().get();
+ return (code_range && code_range->embedded_blob_code_copy() != nullptr)
+ ? EmbeddedData::FromBlob(code_range)
+ : EmbeddedData::FromBlob();
+#else
+ // Otherwise there is a single copy of the blob across all Isolates, use the
+ // global atomic variables.
+ return EmbeddedData::FromBlob();
+#endif
+}
+
+} // namespace
+
Address Code::OffHeapInstructionStart() const {
DCHECK(is_off_heap_trampoline());
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
- // TODO(11527): pass Isolate as an argument.
- // GetIsolateFromWritableObject(*this) works for both read-only and writable
- // objects here because short builtin calls feature requires pointer
- // compression.
- // We don't have to check the Isolate::is_short_builtin_calls_enabled() value
- // because if the short builtin calls wasn't actually enabled because of not
- // enough memory, the FromBlob(isolate) would still be the correct one to use.
- EmbeddedData d =
- FLAG_short_builtin_calls
- ? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
- : EmbeddedData::FromBlob();
+
+ // TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin_index());
}
@@ -169,17 +189,9 @@ Address Code::OffHeapInstructionEnd() const {
if (Isolate::CurrentEmbeddedBlobCode() == nullptr) {
return raw_instruction_size();
}
- // TODO(11527): pass Isolate as an argument.
- // GetIsolateFromWritableObject(*this) works for both read-only and writable
- // objects here because short builtin calls feature requires pointer
- // compression.
- // We don't have to check the Isolate::is_short_builtin_calls_enabled() value
- // because if the short builtin calls wasn't actually enabled because of not
- // enough memory, the FromBlob(isolate) would still be the correct one to use.
- EmbeddedData d =
- FLAG_short_builtin_calls
- ? EmbeddedData::FromBlob(GetIsolateFromWritableObject(*this))
- : EmbeddedData::FromBlob();
+
+ // TODO(11527): pass Isolate as an argument for getting the EmbeddedData.
+ EmbeddedData d = EmbeddedDataWithMaybeRemappedEmbeddedBuiltins(*this);
return d.InstructionStartOfBuiltin(builtin_index()) +
d.InstructionSizeOfBuiltin(builtin_index());
}
@@ -299,11 +311,12 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
- defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_MIPS64)
return RelocIterator(*this, kModeMask).done();
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
- defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32)
+ defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
+ defined(V8_TARGET_ARCH_RISCV64)
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later
@@ -325,67 +338,6 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
return true;
}
-// Multiple native contexts live on the same heap, and V8 currently
-// draws no clear distinction between native-context-dependent and
-// independent objects. A good guideline is "objects embedded into
-// bytecode are nc-independent", since bytecode is shared between
-// native contexts. Among others, this is the case for ScopeInfo,
-// SharedFunctionInfo, String, etc.
-bool Code::IsNativeContextIndependent(Isolate* isolate) {
- static constexpr int kModeMask =
- RelocInfo::AllRealModesMask() &
- ~RelocInfo::ModeMask(RelocInfo::CONST_POOL) &
- ~RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) &
- ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) &
- ~RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) &
- ~RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) &
- ~RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) &
- ~RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
- STATIC_ASSERT(kModeMask ==
- (RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
- RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::DATA_EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::WASM_CALL) |
- RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
-
- bool is_independent = true;
- for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
- if (RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode())) {
- HeapObject o = it.rinfo()->target_object();
- // TODO(jgruber,v8:8888): Extend this with further NCI objects,
- // and define a more systematic
- // IsNativeContextIndependent<T>() predicate.
- if (o.IsString()) continue;
- if (o.IsScopeInfo()) continue;
- if (o.IsHeapNumber()) continue;
- if (o.IsBigInt()) continue;
- if (o.IsSharedFunctionInfo()) continue;
- if (o.IsArrayBoilerplateDescription()) continue;
- if (o.IsObjectBoilerplateDescription()) continue;
- if (o.IsTemplateObjectDescription()) continue;
- if (o.IsFixedArray()) {
- // Some uses of FixedArray are valid.
- // 1. Passed as arg to %DeclareGlobals, contains only strings
- // and SFIs.
- // 2. Passed as arg to %DefineClass. No well defined contents.
- // .. ?
- // TODO(jgruber): Consider assigning dedicated instance
- // types instead of assuming fixed arrays are okay.
- continue;
- }
- // Other objects are expected to be context-dependent.
- PrintF("Found native-context-dependent object:\n");
- o.Print();
- o.map().Print();
- }
- is_independent = false;
- }
-
- return is_independent;
-}
-
bool Code::Inlines(SharedFunctionInfo sfi) {
// We can only check for inlining for optimized code.
DCHECK(is_optimized_code());
@@ -474,7 +426,7 @@ void print_pc(std::ostream& os, int pc) {
}
} // anonymous namespace
-void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) { // NOLINT
+void DeoptimizationData::DeoptimizationDataPrint(std::ostream& os) {
if (length() == 0) {
os << "Deoptimization Input Data invalidated by lazy deoptimization\n";
return;
diff --git a/chromium/v8/src/objects/code.h b/chromium/v8/src/objects/code.h
index d431701936f..fbd52505ee2 100644
--- a/chromium/v8/src/objects/code.h
+++ b/chromium/v8/src/objects/code.h
@@ -447,7 +447,6 @@ class Code : public HeapObject {
inline HandlerTable::CatchPrediction GetBuiltinCatchPrediction();
bool IsIsolateIndependent(Isolate* isolate);
- bool IsNativeContextIndependent(Isolate* isolate);
inline bool CanContainWeakObjects();
@@ -982,7 +981,7 @@ class DeoptimizationData : public FixedArray {
DECL_CAST(DeoptimizationData)
#ifdef ENABLE_DISASSEMBLER
- void DeoptimizationDataPrint(std::ostream& os); // NOLINT
+ void DeoptimizationDataPrint(std::ostream& os);
#endif
private:
diff --git a/chromium/v8/src/objects/compilation-cache-table.cc b/chromium/v8/src/objects/compilation-cache-table.cc
index 9ef14689753..b9345541167 100644
--- a/chromium/v8/src/objects/compilation-cache-table.cc
+++ b/chromium/v8/src/objects/compilation-cache-table.cc
@@ -295,16 +295,6 @@ Handle<Object> CompilationCacheTable::LookupRegExp(Handle<String> src,
return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
}
-MaybeHandle<Code> CompilationCacheTable::LookupCode(
- Handle<SharedFunctionInfo> key) {
- Isolate* isolate = GetIsolate();
- DisallowGarbageCollection no_gc;
- CodeKey k(key);
- InternalIndex entry = FindEntry(isolate, &k);
- if (entry.is_not_found()) return {};
- return Handle<Code>(Code::cast(get(EntryToIndex(entry) + 1)), isolate);
-}
-
Handle<CompilationCacheTable> CompilationCacheTable::PutScript(
Handle<CompilationCacheTable> cache, Handle<String> src,
LanguageMode language_mode, Handle<SharedFunctionInfo> value,
@@ -373,30 +363,6 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutRegExp(
return cache;
}
-Handle<CompilationCacheTable> CompilationCacheTable::PutCode(
- Isolate* isolate, Handle<CompilationCacheTable> cache,
- Handle<SharedFunctionInfo> key, Handle<Code> value) {
- CodeKey k(key);
-
- {
- InternalIndex entry = cache->FindEntry(isolate, &k);
- if (entry.is_found()) {
- // Update.
- cache->set(EntryToIndex(entry), *key);
- cache->set(EntryToIndex(entry) + 1, *value);
- return cache;
- }
- }
-
- // Insert.
- cache = EnsureCapacity(isolate, cache);
- InternalIndex entry = cache->FindInsertionEntry(isolate, k.Hash());
- cache->set(EntryToIndex(entry), *key);
- cache->set(EntryToIndex(entry) + 1, *value);
- cache->ElementAdded();
- return cache;
-}
-
void CompilationCacheTable::Age(Isolate* isolate) {
DisallowGarbageCollection no_gc;
for (InternalIndex entry : IterateEntries()) {
diff --git a/chromium/v8/src/objects/compilation-cache-table.h b/chromium/v8/src/objects/compilation-cache-table.h
index 2fd548bfe57..e39e013c61c 100644
--- a/chromium/v8/src/objects/compilation-cache-table.h
+++ b/chromium/v8/src/objects/compilation-cache-table.h
@@ -123,13 +123,6 @@ class CompilationCacheTable
Isolate* isolate, Handle<CompilationCacheTable> cache, Handle<String> src,
JSRegExp::Flags flags, Handle<FixedArray> value);
- // The Code cache shares native-context-independent (NCI) code between
- // contexts.
- MaybeHandle<Code> LookupCode(Handle<SharedFunctionInfo> key);
- static Handle<CompilationCacheTable> PutCode(
- Isolate* isolate, Handle<CompilationCacheTable> cache,
- Handle<SharedFunctionInfo> key, Handle<Code> value);
-
void Remove(Object value);
void Age(Isolate* isolate);
diff --git a/chromium/v8/src/objects/compressed-slots-inl.h b/chromium/v8/src/objects/compressed-slots-inl.h
index 54c828d919a..b550c9f8515 100644
--- a/chromium/v8/src/objects/compressed-slots-inl.h
+++ b/chromium/v8/src/objects/compressed-slots-inl.h
@@ -28,6 +28,13 @@ bool CompressedObjectSlot::contains_value(Address raw_value) const {
static_cast<uint32_t>(static_cast<Tagged_t>(raw_value));
}
+bool CompressedObjectSlot::contains_map_value(Address raw_value) const {
+ // Simply forward to contains_value because map packing is not supported with
+ // pointer compression.
+ DCHECK(!V8_MAP_PACKING_BOOL);
+ return contains_value(raw_value);
+}
+
Object CompressedObjectSlot::operator*() const {
Tagged_t value = *location();
return Object(DecompressTaggedAny(address(), value));
@@ -42,6 +49,20 @@ void CompressedObjectSlot::store(Object value) const {
*location() = CompressTagged(value.ptr());
}
+void CompressedObjectSlot::store_map(Map map) const {
+ // Simply forward to store because map packing is not supported with pointer
+ // compression.
+ DCHECK(!V8_MAP_PACKING_BOOL);
+ store(map);
+}
+
+Map CompressedObjectSlot::load_map() const {
+ // Simply forward to Relaxed_Load because map packing is not supported with
+ // pointer compression.
+ DCHECK(!V8_MAP_PACKING_BOOL);
+ return Map::unchecked_cast(Relaxed_Load());
+}
+
Object CompressedObjectSlot::Acquire_Load() const {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location());
return Object(DecompressTaggedAny(address(), value));
diff --git a/chromium/v8/src/objects/compressed-slots.h b/chromium/v8/src/objects/compressed-slots.h
index 7737e685fe0..f13fc15adcf 100644
--- a/chromium/v8/src/objects/compressed-slots.h
+++ b/chromium/v8/src/objects/compressed-slots.h
@@ -37,12 +37,16 @@ class CompressedObjectSlot : public SlotBase<CompressedObjectSlot, Tagged_t> {
// Compares memory representation of a value stored in the slot with given
// raw value without decompression.
inline bool contains_value(Address raw_value) const;
+ inline bool contains_map_value(Address raw_value) const;
// TODO(leszeks): Consider deprecating the operator* load, and always pass the
// Isolate.
inline Object operator*() const;
inline Object load(PtrComprCageBase cage_base) const;
inline void store(Object value) const;
+ inline void store_map(Map map) const;
+
+ inline Map load_map() const;
inline Object Acquire_Load() const;
inline Object Relaxed_Load() const;
diff --git a/chromium/v8/src/objects/contexts-inl.h b/chromium/v8/src/objects/contexts-inl.h
index 356df687ded..d744ffc6818 100644
--- a/chromium/v8/src/objects/contexts-inl.h
+++ b/chromium/v8/src/objects/contexts-inl.h
@@ -55,38 +55,54 @@ NEVER_READ_ONLY_SPACE_IMPL(Context)
CAST_ACCESSOR(NativeContext)
-V8_INLINE Object Context::get(int index) const { return elements(index); }
-V8_INLINE Object Context::get(PtrComprCageBase cage_base, int index) const {
- return elements(cage_base, index);
-}
-V8_INLINE void Context::set(int index, Object value) {
- set_elements(index, value);
+Object Context::get(int index) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return get(cage_base, index);
}
-V8_INLINE void Context::set(int index, Object value, WriteBarrierMode mode) {
- set_elements(index, value, mode);
+
+Object Context::get(PtrComprCageBase cage_base, int index) const {
+ DCHECK_LT(static_cast<unsigned int>(index),
+ static_cast<unsigned int>(length()));
+ return TaggedField<Object>::Relaxed_Load(cage_base, *this,
+ OffsetOfElementAt(index));
}
-void Context::set_scope_info(ScopeInfo scope_info, WriteBarrierMode mode) {
- set(SCOPE_INFO_INDEX, scope_info, mode);
+void Context::set(int index, Object value, WriteBarrierMode mode) {
+ DCHECK_LT(static_cast<unsigned int>(index),
+ static_cast<unsigned int>(length()));
+ const int offset = OffsetOfElementAt(index);
+ RELAXED_WRITE_FIELD(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
}
-Object Context::synchronized_get(int index) const {
+Object Context::get(int index, AcquireLoadTag tag) const {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
- return synchronized_get(cage_base, index);
+ return get(cage_base, index, tag);
}
-Object Context::synchronized_get(PtrComprCageBase cage_base, int index) const {
+Object Context::get(PtrComprCageBase cage_base, int index,
+ AcquireLoadTag) const {
DCHECK_LT(static_cast<unsigned int>(index),
- static_cast<unsigned int>(this->length()));
+ static_cast<unsigned int>(length()));
return ACQUIRE_READ_FIELD(*this, OffsetOfElementAt(index));
}
-void Context::synchronized_set(int index, Object value) {
+void Context::set(int index, Object value, WriteBarrierMode mode,
+ ReleaseStoreTag) {
DCHECK_LT(static_cast<unsigned int>(index),
- static_cast<unsigned int>(this->length()));
+ static_cast<unsigned int>(length()));
const int offset = OffsetOfElementAt(index);
RELEASE_WRITE_FIELD(*this, offset, value);
- WRITE_BARRIER(*this, offset, value);
+ CONDITIONAL_WRITE_BARRIER(*this, offset, value, mode);
+}
+
+void NativeContext::set(int index, Object value, WriteBarrierMode mode,
+ ReleaseStoreTag tag) {
+ Context::set(index, value, mode, tag);
+}
+
+void Context::set_scope_info(ScopeInfo scope_info, WriteBarrierMode mode) {
+ set(SCOPE_INFO_INDEX, scope_info, mode);
}
Object Context::unchecked_previous() { return get(PREVIOUS_INDEX); }
@@ -161,18 +177,22 @@ bool Context::HasSameSecurityTokenAs(Context that) const {
that.native_context().security_token();
}
-#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
- void Context::set_##name(type value) { \
- DCHECK(IsNativeContext()); \
- set(index, value); \
- } \
- bool Context::is_##name(type value) const { \
- DCHECK(IsNativeContext()); \
- return type::cast(get(index)) == value; \
- } \
- type Context::name() const { \
- DCHECK(IsNativeContext()); \
- return type::cast(get(index)); \
+#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+ void Context::set_##name(type value) { \
+ DCHECK(IsNativeContext()); \
+ set(index, value, UPDATE_WRITE_BARRIER, kReleaseStore); \
+ } \
+ bool Context::is_##name(type value) const { \
+ DCHECK(IsNativeContext()); \
+ return type::cast(get(index)) == value; \
+ } \
+ type Context::name() const { \
+ DCHECK(IsNativeContext()); \
+ return type::cast(get(index)); \
+ } \
+ type Context::name(AcquireLoadTag tag) const { \
+ DCHECK(IsNativeContext()); \
+ return type::cast(get(index, tag)); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
#undef NATIVE_CONTEXT_FIELD_ACCESSORS
@@ -242,8 +262,9 @@ Map Context::GetInitialJSArrayMap(ElementsKind kind) const {
}
DEF_GETTER(NativeContext, microtask_queue, MicrotaskQueue*) {
+ Isolate* isolate = GetIsolateForHeapSandbox(*this);
return reinterpret_cast<MicrotaskQueue*>(ReadExternalPointerField(
- kMicrotaskQueueOffset, cage_base, kNativeContextMicrotaskQueueTag));
+ kMicrotaskQueueOffset, isolate, kNativeContextMicrotaskQueueTag));
}
void NativeContext::AllocateExternalPointerEntries(Isolate* isolate) {
@@ -259,11 +280,13 @@ void NativeContext::set_microtask_queue(Isolate* isolate,
void NativeContext::synchronized_set_script_context_table(
ScriptContextTable script_context_table) {
- synchronized_set(SCRIPT_CONTEXT_TABLE_INDEX, script_context_table);
+ set(SCRIPT_CONTEXT_TABLE_INDEX, script_context_table, UPDATE_WRITE_BARRIER,
+ kReleaseStore);
}
ScriptContextTable NativeContext::synchronized_script_context_table() const {
- return ScriptContextTable::cast(synchronized_get(SCRIPT_CONTEXT_TABLE_INDEX));
+ return ScriptContextTable::cast(
+ get(SCRIPT_CONTEXT_TABLE_INDEX, kAcquireLoad));
}
OSROptimizedCodeCache NativeContext::GetOSROptimizedCodeCache() {
diff --git a/chromium/v8/src/objects/contexts.cc b/chromium/v8/src/objects/contexts.cc
index af73bf02568..eade27d934b 100644
--- a/chromium/v8/src/objects/contexts.cc
+++ b/chromium/v8/src/objects/contexts.cc
@@ -55,6 +55,7 @@ bool ScriptContextTable::Lookup(Isolate* isolate, ScriptContextTable table,
for (int i = 0; i < table.synchronized_used(); i++) {
Context context = table.get_context(i);
DCHECK(context.IsScriptContext());
+ result->is_repl_mode = context.scope_info().IsReplModeScope();
int slot_index = ScopeInfo::ContextSlotIndex(
context.scope_info(), name, &result->mode, &result->init_flag,
&result->maybe_assigned_flag, &is_static_flag);
@@ -415,11 +416,11 @@ void NativeContext::AddOptimizedCode(Code code) {
DCHECK(CodeKindCanDeoptimize(code.kind()));
DCHECK(code.next_code_link().IsUndefined());
code.set_next_code_link(get(OPTIMIZED_CODE_LIST));
- set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER);
+ set(OPTIMIZED_CODE_LIST, code, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore);
}
void NativeContext::SetOptimizedCodeListHead(Object head) {
- set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
+ set(OPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore);
}
Object NativeContext::OptimizedCodeListHead() {
@@ -427,7 +428,7 @@ Object NativeContext::OptimizedCodeListHead() {
}
void NativeContext::SetDeoptimizedCodeListHead(Object head) {
- set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER);
+ set(DEOPTIMIZED_CODE_LIST, head, UPDATE_WEAK_WRITE_BARRIER, kReleaseStore);
}
Object NativeContext::DeoptimizedCodeListHead() {
@@ -511,5 +512,53 @@ STATIC_ASSERT(NativeContext::kSize ==
(Context::SizeFor(NativeContext::NATIVE_CONTEXT_SLOTS) +
kSystemPointerSize));
+void NativeContext::RunPromiseHook(PromiseHookType type,
+ Handle<JSPromise> promise,
+ Handle<Object> parent) {
+ Isolate* isolate = promise->GetIsolate();
+ DCHECK(isolate->HasContextPromiseHooks());
+ int contextSlot;
+
+ switch (type) {
+ case PromiseHookType::kInit:
+ contextSlot = PROMISE_HOOK_INIT_FUNCTION_INDEX;
+ break;
+ case PromiseHookType::kResolve:
+ contextSlot = PROMISE_HOOK_RESOLVE_FUNCTION_INDEX;
+ break;
+ case PromiseHookType::kBefore:
+ contextSlot = PROMISE_HOOK_BEFORE_FUNCTION_INDEX;
+ break;
+ case PromiseHookType::kAfter:
+ contextSlot = PROMISE_HOOK_AFTER_FUNCTION_INDEX;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Handle<Object> hook(isolate->native_context()->get(contextSlot), isolate);
+ if (hook->IsUndefined()) return;
+
+ int argc = type == PromiseHookType::kInit ? 2 : 1;
+ Handle<Object> argv[2] = {
+ Handle<Object>::cast(promise),
+ parent
+ };
+
+ Handle<Object> receiver = isolate->global_proxy();
+
+ if (Execution::Call(isolate, hook, receiver, argc, argv).is_null()) {
+ DCHECK(isolate->has_pending_exception());
+ Handle<Object> exception(isolate->pending_exception(), isolate);
+
+ MessageLocation* no_location = nullptr;
+ Handle<JSMessageObject> message =
+ isolate->CreateMessageOrAbort(exception, no_location);
+ MessageHandler::ReportMessage(isolate, no_location, message);
+
+ isolate->clear_pending_exception();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/objects/contexts.h b/chromium/v8/src/objects/contexts.h
index 79aed5d40ff..2c8988f13ec 100644
--- a/chromium/v8/src/objects/contexts.h
+++ b/chromium/v8/src/objects/contexts.h
@@ -50,7 +50,8 @@ enum ContextLookupFlags {
V(MATH_POW_INDEX, JSFunction, math_pow) \
V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
promise_internal_constructor) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then)
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
+ V(FUNCTION_PROTOTYPE_APPLY_INDEX, JSFunction, function_prototype_apply)
#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSGlobalProxy, global_proxy_object) \
@@ -111,6 +112,8 @@ enum ContextLookupFlags {
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(ASYNC_GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
async_generator_object_prototype_map) \
+ V(GROWABLE_SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, \
+ growable_shared_array_buffer_fun) \
V(INITIAL_ARRAY_ITERATOR_MAP_INDEX, Map, initial_array_iterator_map) \
V(INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX, JSObject, \
initial_array_iterator_prototype) \
@@ -198,12 +201,29 @@ enum ContextLookupFlags {
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
+ V(PROMISE_HOOK_INIT_FUNCTION_INDEX, Object, promise_hook_init_function) \
+ V(PROMISE_HOOK_BEFORE_FUNCTION_INDEX, Object, promise_hook_before_function) \
+ V(PROMISE_HOOK_AFTER_FUNCTION_INDEX, Object, promise_hook_after_function) \
+ V(PROMISE_HOOK_RESOLVE_FUNCTION_INDEX, Object, \
+ promise_hook_resolve_function) \
V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
V(PROXY_MAP_INDEX, Map, proxy_map) \
V(PROXY_REVOCABLE_RESULT_MAP_INDEX, Map, proxy_revocable_result_map) \
V(PROMISE_PROTOTYPE_INDEX, JSObject, promise_prototype) \
+ V(RAB_GSAB_UINT8_ARRAY_MAP_INDEX, Map, rab_gsab_uint8_array_map) \
+ V(RAB_GSAB_INT8_ARRAY_MAP_INDEX, Map, rab_gsab_int8_array_map) \
+ V(RAB_GSAB_UINT16_ARRAY_MAP_INDEX, Map, rab_gsab_uint16_array_map) \
+ V(RAB_GSAB_INT16_ARRAY_MAP_INDEX, Map, rab_gsab_int16_array_map) \
+ V(RAB_GSAB_UINT32_ARRAY_MAP_INDEX, Map, rab_gsab_uint32_array_map) \
+ V(RAB_GSAB_INT32_ARRAY_MAP_INDEX, Map, rab_gsab_int32_array_map) \
+ V(RAB_GSAB_FLOAT32_ARRAY_MAP_INDEX, Map, rab_gsab_float32_array_map) \
+ V(RAB_GSAB_FLOAT64_ARRAY_MAP_INDEX, Map, rab_gsab_float64_array_map) \
+ V(RAB_GSAB_UINT8_CLAMPED_ARRAY_MAP_INDEX, Map, \
+ rab_gsab_uint8_clamped_array_map) \
+ V(RAB_GSAB_BIGUINT64_ARRAY_MAP_INDEX, Map, rab_gsab_biguint64_array_map) \
+ V(RAB_GSAB_BIGINT64_ARRAY_MAP_INDEX, Map, rab_gsab_bigint64_array_map) \
V(RECORDER_CONTEXT_ID, Object, recorder_context_id) \
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
@@ -220,6 +240,7 @@ enum ContextLookupFlags {
V(REGEXP_SPLIT_FUNCTION_INDEX, JSFunction, regexp_split_function) \
V(INITIAL_REGEXP_STRING_ITERATOR_PROTOTYPE_MAP_INDEX, Map, \
initial_regexp_string_iterator_prototype_map) \
+ V(RESIZABLE_ARRAY_BUFFER_FUN_INDEX, JSFunction, resizable_array_buffer_fun) \
V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
V(SCRIPT_EXECUTION_CALLBACK_INDEX, Object, script_execution_callback) \
V(SECURITY_TOKEN_INDEX, Object, security_token) \
@@ -337,6 +358,9 @@ class ScriptContextTable : public FixedArray {
struct LookupResult {
int context_index;
int slot_index;
+ // repl_mode flag is needed to disable inlining of 'const' variables in REPL
+ // mode.
+ bool is_repl_mode;
VariableMode mode;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
@@ -421,16 +445,18 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
NEVER_READ_ONLY_SPACE
// Setter and getter for elements.
+ // Note the plain accessors use relaxed semantics.
+ // TODO(jgruber): Make that explicit through tags.
V8_INLINE Object get(int index) const;
V8_INLINE Object get(PtrComprCageBase cage_base, int index) const;
- V8_INLINE void set(int index, Object value);
- // Setter with explicit barrier mode.
- V8_INLINE void set(int index, Object value, WriteBarrierMode mode);
- // Setter and getter with synchronization semantics.
- V8_INLINE Object synchronized_get(int index) const;
- V8_INLINE Object synchronized_get(PtrComprCageBase cage_base,
- int index) const;
- V8_INLINE void synchronized_set(int index, Object value);
+ V8_INLINE void set(int index, Object value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ // Accessors with acquire-release semantics.
+ V8_INLINE Object get(int index, AcquireLoadTag) const;
+ V8_INLINE Object get(PtrComprCageBase cage_base, int index,
+ AcquireLoadTag) const;
+ V8_INLINE void set(int index, Object value, WriteBarrierMode mode,
+ ReleaseStoreTag);
static const int kScopeInfoOffset = kElementsOffset;
static const int kPreviousOffset = kScopeInfoOffset + kTaggedSize;
@@ -447,7 +473,7 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
V8_INLINE static constexpr int SizeFor(int length) {
// TODO(v8:9287): This is a workaround for GCMole build failures.
int result = kElementsOffset + length * kTaggedSize;
- CONSTEXPR_DCHECK(TorqueGeneratedContext::SizeFor(length) == result);
+ DCHECK_EQ(TorqueGeneratedContext::SizeFor(length), result);
return result;
}
@@ -523,8 +549,6 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
inline Object unchecked_previous();
inline Context previous();
- inline void set_previous(Context context,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline Object next_context_link();
@@ -582,7 +606,8 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
inline void set_##name(type value); \
inline bool is_##name(type value) const; \
- inline type name() const;
+ inline type name() const; \
+ inline type name(AcquireLoadTag) const;
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
#undef NATIVE_CONTEXT_FIELD_ACCESSORS
@@ -638,6 +663,10 @@ class Context : public TorqueGeneratedContext<Context, HeapObject> {
static bool IsBootstrappingOrValidParentContext(Object object, Context kid);
#endif
+ friend class Factory;
+ inline void set_previous(Context context,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
TQ_OBJECT_CONSTRUCTORS(Context)
};
@@ -648,6 +677,15 @@ class NativeContext : public Context {
inline void AllocateExternalPointerEntries(Isolate* isolate);
+ // NativeContext fields are read concurrently from background threads; any
+ // concurrent writes of affected fields must have acquire-release semantics,
+ // thus we hide the non-atomic setter. Note this doesn't protect fully since
+ // one could still use Context::set and/or write directly using offsets (e.g.
+ // from CSA/Torque).
+ void set(int index, Object value, WriteBarrierMode mode) = delete;
+ V8_INLINE void set(int index, Object value, WriteBarrierMode mode,
+ ReleaseStoreTag);
+
// [microtask_queue]: pointer to the MicrotaskQueue object.
DECL_GETTER(microtask_queue, MicrotaskQueue*)
inline void set_microtask_queue(Isolate* isolate, MicrotaskQueue* queue);
@@ -696,6 +734,9 @@ class NativeContext : public Context {
void IncrementErrorsThrown();
int GetErrorsThrown();
+ void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+ Handle<Object> parent);
+
private:
STATIC_ASSERT(OffsetOfElementAt(EMBEDDER_DATA_INDEX) ==
Internals::kNativeContextEmbedderDataOffset);
diff --git a/chromium/v8/src/objects/contexts.tq b/chromium/v8/src/objects/contexts.tq
index 604852c24ea..83c43cc7f57 100644
--- a/chromium/v8/src/objects/contexts.tq
+++ b/chromium/v8/src/objects/contexts.tq
@@ -13,7 +13,7 @@ class Context extends HeapObject {
return *ContextSlot(this, ContextSlot::SCOPE_INFO_INDEX);
}
const length: Smi;
- @relaxedRead @relaxedWrite elements[length]: Object;
+ elements[length]: Object;
}
extern class AwaitContext extends Context generates 'TNode<Context>';
@@ -118,12 +118,42 @@ extern enum ContextSlot extends intptr constexpr 'Context::Field' {
SLOPPY_ARGUMENTS_MAP_INDEX: Slot<NativeContext, Map>,
FAST_ALIASED_ARGUMENTS_MAP_INDEX: Slot<NativeContext, Map>,
FUNCTION_CONTEXT_MAP_INDEX: Slot<NativeContext, Map>,
+ FUNCTION_PROTOTYPE_APPLY_INDEX: Slot<NativeContext, JSFunction>,
+
+ UINT8_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ INT8_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ UINT16_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ INT16_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ UINT32_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ INT32_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ FLOAT32_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ FLOAT64_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ UINT8_CLAMPED_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ BIGUINT64_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+ BIGINT64_ARRAY_FUN_INDEX: Slot<NativeContext, JSFunction>,
+
+ RAB_GSAB_UINT8_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_INT8_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_UINT16_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_INT16_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_UINT32_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_INT32_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_FLOAT32_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_FLOAT64_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_UINT8_CLAMPED_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_BIGUINT64_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
+ RAB_GSAB_BIGINT64_ARRAY_MAP_INDEX: Slot<NativeContext, Map>,
PROMISE_FUNCTION_INDEX: Slot<NativeContext, JSFunction>,
PROMISE_THEN_INDEX: Slot<NativeContext, JSFunction>,
PROMISE_PROTOTYPE_INDEX: Slot<NativeContext, JSObject>,
STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX: Slot<NativeContext, Map>,
+ PROMISE_HOOK_INIT_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
+ PROMISE_HOOK_BEFORE_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
+ PROMISE_HOOK_AFTER_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
+ PROMISE_HOOK_RESOLVE_FUNCTION_INDEX: Slot<NativeContext, Undefined|Callable>,
+
CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX: Slot<NativeContext, HeapObject>,
BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX: Slot<NativeContext, Map>,
diff --git a/chromium/v8/src/objects/debug-objects-inl.h b/chromium/v8/src/objects/debug-objects-inl.h
index 8c8b013e232..e9e7c08f4cd 100644
--- a/chromium/v8/src/objects/debug-objects-inl.h
+++ b/chromium/v8/src/objects/debug-objects-inl.h
@@ -36,12 +36,6 @@ BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, computed_debug_is_blackboxed,
BIT_FIELD_ACCESSORS(DebugInfo, debugger_hints, debugging_id,
DebugInfo::DebuggingIdBits)
-// TODO(nicohartmann@, v8:11122): Remove once Torque can generate them.
-RELEASE_ACQUIRE_ACCESSORS(DebugInfo, debug_bytecode_array, HeapObject,
- kDebugBytecodeArrayOffset)
-RELEASE_ACQUIRE_ACCESSORS(DebugInfo, original_bytecode_array, HeapObject,
- kOriginalBytecodeArrayOffset)
-
bool DebugInfo::HasInstrumentedBytecodeArray() {
return debug_bytecode_array(kAcquireLoad).IsBytecodeArray();
}
diff --git a/chromium/v8/src/objects/debug-objects.cc b/chromium/v8/src/objects/debug-objects.cc
index ecb24632ef2..d3caa122332 100644
--- a/chromium/v8/src/objects/debug-objects.cc
+++ b/chromium/v8/src/objects/debug-objects.cc
@@ -14,18 +14,23 @@ namespace v8 {
namespace internal {
bool DebugInfo::IsEmpty() const {
- return flags() == kNone && debugger_hints() == 0;
+ return flags(kRelaxedLoad) == kNone && debugger_hints() == 0;
}
-bool DebugInfo::HasBreakInfo() const { return (flags() & kHasBreakInfo) != 0; }
+bool DebugInfo::HasBreakInfo() const {
+ return (flags(kRelaxedLoad) & kHasBreakInfo) != 0;
+}
DebugInfo::ExecutionMode DebugInfo::DebugExecutionMode() const {
- return (flags() & kDebugExecutionMode) != 0 ? kSideEffects : kBreakpoints;
+ return (flags(kRelaxedLoad) & kDebugExecutionMode) != 0 ? kSideEffects
+ : kBreakpoints;
}
void DebugInfo::SetDebugExecutionMode(ExecutionMode value) {
- set_flags(value == kSideEffects ? (flags() | kDebugExecutionMode)
- : (flags() & ~kDebugExecutionMode));
+ set_flags(value == kSideEffects
+ ? (flags(kRelaxedLoad) | kDebugExecutionMode)
+ : (flags(kRelaxedLoad) & ~kDebugExecutionMode),
+ kRelaxedStore);
}
void DebugInfo::ClearBreakInfo(Isolate* isolate) {
@@ -45,27 +50,29 @@ void DebugInfo::ClearBreakInfo(Isolate* isolate) {
}
set_break_points(ReadOnlyRoots(isolate).empty_fixed_array());
- int new_flags = flags();
+ int new_flags = flags(kRelaxedLoad);
new_flags &= ~kHasBreakInfo & ~kPreparedForDebugExecution;
new_flags &= ~kBreakAtEntry & ~kCanBreakAtEntry;
new_flags &= ~kDebugExecutionMode;
- set_flags(new_flags);
+ set_flags(new_flags, kRelaxedStore);
}
void DebugInfo::SetBreakAtEntry() {
DCHECK(CanBreakAtEntry());
- set_flags(flags() | kBreakAtEntry);
+ set_flags(flags(kRelaxedLoad) | kBreakAtEntry, kRelaxedStore);
}
void DebugInfo::ClearBreakAtEntry() {
DCHECK(CanBreakAtEntry());
- set_flags(flags() & ~kBreakAtEntry);
+ set_flags(flags(kRelaxedLoad) & ~kBreakAtEntry, kRelaxedStore);
}
-bool DebugInfo::BreakAtEntry() const { return (flags() & kBreakAtEntry) != 0; }
+bool DebugInfo::BreakAtEntry() const {
+ return (flags(kRelaxedLoad) & kBreakAtEntry) != 0;
+}
bool DebugInfo::CanBreakAtEntry() const {
- return (flags() & kCanBreakAtEntry) != 0;
+ return (flags(kRelaxedLoad) & kCanBreakAtEntry) != 0;
}
// Check if there is a break point at this source position.
@@ -199,15 +206,15 @@ Handle<Object> DebugInfo::FindBreakPointInfo(Isolate* isolate,
}
bool DebugInfo::HasCoverageInfo() const {
- return (flags() & kHasCoverageInfo) != 0;
+ return (flags(kRelaxedLoad) & kHasCoverageInfo) != 0;
}
void DebugInfo::ClearCoverageInfo(Isolate* isolate) {
if (HasCoverageInfo()) {
set_coverage_info(ReadOnlyRoots(isolate).undefined_value());
- int new_flags = flags() & ~kHasCoverageInfo;
- set_flags(new_flags);
+ int new_flags = flags(kRelaxedLoad) & ~kHasCoverageInfo;
+ set_flags(new_flags, kRelaxedStore);
}
}
diff --git a/chromium/v8/src/objects/debug-objects.h b/chromium/v8/src/objects/debug-objects.h
index e92a3026f88..77c9b80e659 100644
--- a/chromium/v8/src/objects/debug-objects.h
+++ b/chromium/v8/src/objects/debug-objects.h
@@ -37,7 +37,10 @@ class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
// --- Debug execution ---
// -----------------------
- enum ExecutionMode { kBreakpoints = 0, kSideEffects = kDebugExecutionMode };
+ enum ExecutionMode : uint8_t {
+ kBreakpoints = 0,
+ kSideEffects = kDebugExecutionMode
+ };
// Returns current debug execution mode. Debug execution mode defines by
// applied to bytecode patching. False for breakpoints, true for side effect
@@ -45,9 +48,6 @@ class DebugInfo : public TorqueGeneratedDebugInfo<DebugInfo, Struct> {
ExecutionMode DebugExecutionMode() const;
void SetDebugExecutionMode(ExecutionMode value);
- DECL_RELEASE_ACQUIRE_ACCESSORS(debug_bytecode_array, HeapObject)
- DECL_RELEASE_ACQUIRE_ACCESSORS(original_bytecode_array, HeapObject)
-
// Specifies whether the associated function has an instrumented bytecode
// array. If so, OriginalBytecodeArray returns the non-instrumented bytecode,
// and DebugBytecodeArray returns the instrumented bytecode.
diff --git a/chromium/v8/src/objects/debug-objects.tq b/chromium/v8/src/objects/debug-objects.tq
index b8ca86781fe..1df2e7a484d 100644
--- a/chromium/v8/src/objects/debug-objects.tq
+++ b/chromium/v8/src/objects/debug-objects.tq
@@ -44,14 +44,18 @@ extern class DebugInfo extends Struct {
script: Undefined|Script;
// The original uninstrumented bytecode array for functions with break
// points - the instrumented bytecode is held in the shared function info.
+ @cppAcquireLoad
+ @cppReleaseStore
original_bytecode_array: Undefined|BytecodeArray;
// The debug instrumented bytecode array for functions with break points
// - also pointed to by the shared function info.
+ @cppAcquireLoad
+ @cppReleaseStore
debug_bytecode_array: Undefined|BytecodeArray;
// Fixed array holding status information for each active break point.
break_points: FixedArray;
// A bitfield that lists uses of the current instance.
- flags: SmiTagged<DebugInfoFlags>;
+ @cppRelaxedLoad @cppRelaxedStore flags: SmiTagged<DebugInfoFlags>;
coverage_info: CoverageInfo|Undefined;
}
diff --git a/chromium/v8/src/objects/descriptor-array.h b/chromium/v8/src/objects/descriptor-array.h
index 327931a421e..1a9eb7fae1f 100644
--- a/chromium/v8/src/objects/descriptor-array.h
+++ b/chromium/v8/src/objects/descriptor-array.h
@@ -133,9 +133,9 @@ class DescriptorArray
// Allocates a DescriptorArray, but returns the singleton
// empty descriptor array object if number_of_descriptors is 0.
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_EXPORT_PRIVATE static Handle<DescriptorArray> Allocate(
- LocalIsolate* isolate, int nof_descriptors, int slack,
+ IsolateT* isolate, int nof_descriptors, int slack,
AllocationType allocation = AllocationType::kYoung);
void Initialize(EnumCache enum_cache, HeapObject undefined_value,
diff --git a/chromium/v8/src/objects/dictionary.h b/chromium/v8/src/objects/dictionary.h
index be255f8162d..a2db1b6289b 100644
--- a/chromium/v8/src/objects/dictionary.h
+++ b/chromium/v8/src/objects/dictionary.h
@@ -76,9 +76,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) Dictionary
// Garbage collection support.
inline ObjectSlot RawFieldOfValueAt(InternalIndex entry);
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_WARN_UNUSED_RESULT static Handle<Derived> Add(
- LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
+ IsolateT* isolate, Handle<Derived> dictionary, Key key,
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr);
@@ -142,9 +142,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
inline int Hash() const;
// Creates a new dictionary.
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_WARN_UNUSED_RESULT static Handle<Derived> New(
- LocalIsolate* isolate, int at_least_space_for,
+ IsolateT* isolate, int at_least_space_for,
AllocationType allocation = AllocationType::kYoung,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
@@ -159,9 +159,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) BaseNameDictionary
static Handle<FixedArray> IterationIndices(Isolate* isolate,
Handle<Derived> dictionary);
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_WARN_UNUSED_RESULT static Handle<Derived> AddNoUpdateNextEnumerationIndex(
- LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
+ IsolateT* isolate, Handle<Derived> dictionary, Key key,
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr);
diff --git a/chromium/v8/src/objects/elements-kind.cc b/chromium/v8/src/objects/elements-kind.cc
index 78670f25bd0..6ffa34b868f 100644
--- a/chromium/v8/src/objects/elements-kind.cc
+++ b/chromium/v8/src/objects/elements-kind.cc
@@ -17,19 +17,30 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
case UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
+ case RAB_GSAB_UINT8_ELEMENTS:
+ case RAB_GSAB_INT8_ELEMENTS:
+ case RAB_GSAB_UINT8_CLAMPED_ELEMENTS:
return 0;
case UINT16_ELEMENTS:
case INT16_ELEMENTS:
+ case RAB_GSAB_UINT16_ELEMENTS:
+ case RAB_GSAB_INT16_ELEMENTS:
return 1;
case UINT32_ELEMENTS:
case INT32_ELEMENTS:
case FLOAT32_ELEMENTS:
+ case RAB_GSAB_UINT32_ELEMENTS:
+ case RAB_GSAB_INT32_ELEMENTS:
+ case RAB_GSAB_FLOAT32_ELEMENTS:
return 2;
case PACKED_DOUBLE_ELEMENTS:
case HOLEY_DOUBLE_ELEMENTS:
case FLOAT64_ELEMENTS:
case BIGINT64_ELEMENTS:
case BIGUINT64_ELEMENTS:
+ case RAB_GSAB_FLOAT64_ELEMENTS:
+ case RAB_GSAB_BIGINT64_ELEMENTS:
+ case RAB_GSAB_BIGUINT64_ELEMENTS:
return 3;
case PACKED_SMI_ELEMENTS:
case PACKED_ELEMENTS:
@@ -109,6 +120,7 @@ const char* ElementsKindToString(ElementsKind kind) {
return #TYPE "ELEMENTS";
TYPED_ARRAYS(PRINT_NAME);
+ RAB_GSAB_TYPED_ARRAYS(PRINT_NAME);
#undef PRINT_NAME
case NO_ELEMENTS:
return "NO_ELEMENTS";
diff --git a/chromium/v8/src/objects/elements-kind.h b/chromium/v8/src/objects/elements-kind.h
index ec3ca3402d1..b19e21136f3 100644
--- a/chromium/v8/src/objects/elements-kind.h
+++ b/chromium/v8/src/objects/elements-kind.h
@@ -28,6 +28,36 @@ namespace internal {
V(BigUint64, biguint64, BIGUINT64, uint64_t) \
V(BigInt64, bigint64, BIGINT64, int64_t)
+#define RAB_GSAB_TYPED_ARRAYS(V) \
+ V(RabGsabUint8, rab_gsab_uint8, RAB_GSAB_UINT8, uint8_t) \
+ V(RabGsabInt8, rab_gsab_int8, RAB_GSAB_INT8, int8_t) \
+ V(RabGsabUint16, rab_gsab_uint16, RAB_GSAB_UINT16, uint16_t) \
+ V(RabGsabInt16, rab_gsab_int16, RAB_GSAB_INT16, int16_t) \
+ V(RabGsabUint32, rab_gsab_uint32, RAB_GSAB_UINT32, uint32_t) \
+ V(RabGsabInt32, rab_gsab_int32, RAB_GSAB_INT32, int32_t) \
+ V(RabGsabFloat32, rab_gsab_float32, RAB_GSAB_FLOAT32, float) \
+ V(RabGsabFloat64, rab_gsab_float64, RAB_GSAB_FLOAT64, double) \
+ V(RabGsabUint8Clamped, rab_gsab_uint8_clamped, RAB_GSAB_UINT8_CLAMPED, \
+ uint8_t) \
+ V(RabGsabBigUint64, rab_gsab_biguint64, RAB_GSAB_BIGUINT64, uint64_t) \
+ V(RabGsabBigInt64, rab_gsab_bigint64, RAB_GSAB_BIGINT64, int64_t)
+
+// The TypedArrays backed by RAB / GSAB are called Uint8Array, Uint16Array etc,
+// and not RabGsabUint8Array, RabGsabUint16Array etc. This macro is used for
+// generating code which refers to the TypedArray type.
+#define RAB_GSAB_TYPED_ARRAYS_WITH_TYPED_ARRAY_TYPE(V) \
+ V(Uint8, rab_gsab_uint8, RAB_GSAB_UINT8, uint8_t) \
+ V(Int8, rab_gsab_int8, RAB_GSAB_INT8, int8_t) \
+ V(Uint16, rab_gsab_uint16, RAB_GSAB_UINT16, uint16_t) \
+ V(Int16, rab_gsab_int16, RAB_GSAB_INT16, int16_t) \
+ V(Uint32, rab_gsab_uint32, RAB_GSAB_UINT32, uint32_t) \
+ V(Int32, rab_gsab_int32, RAB_GSAB_INT32, int32_t) \
+ V(Float32, rab_gsab_float32, RAB_GSAB_FLOAT32, float) \
+ V(Float64, rab_gsab_float64, RAB_GSAB_FLOAT64, double) \
+ V(Uint8Clamped, rab_gsab_uint8_clamped, RAB_GSAB_UINT8_CLAMPED, uint8_t) \
+ V(BigUint64, rab_gsab_biguint64, RAB_GSAB_BIGUINT64, uint64_t) \
+ V(BigInt64, rab_gsab_bigint64, RAB_GSAB_BIGINT64, int64_t)
+
enum ElementsKind : uint8_t {
// The "fast" kind for elements that only contain SMI values. Must be first
// to make it possible to efficiently check maps for this kind.
@@ -71,6 +101,7 @@ enum ElementsKind : uint8_t {
// Fixed typed arrays.
#define TYPED_ARRAY_ELEMENTS_KIND(Type, type, TYPE, ctype) TYPE##_ELEMENTS,
TYPED_ARRAYS(TYPED_ARRAY_ELEMENTS_KIND)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_ELEMENTS_KIND)
#undef TYPED_ARRAY_ELEMENTS_KIND
// Sentinel ElementsKind for objects with no elements.
@@ -78,11 +109,13 @@ enum ElementsKind : uint8_t {
// Derived constants from ElementsKind.
FIRST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
- LAST_ELEMENTS_KIND = BIGINT64_ELEMENTS,
+ LAST_ELEMENTS_KIND = RAB_GSAB_BIGINT64_ELEMENTS,
FIRST_FAST_ELEMENTS_KIND = PACKED_SMI_ELEMENTS,
LAST_FAST_ELEMENTS_KIND = HOLEY_DOUBLE_ELEMENTS,
FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = UINT8_ELEMENTS,
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND = BIGINT64_ELEMENTS,
+ FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND = RAB_GSAB_UINT8_ELEMENTS,
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND = RAB_GSAB_BIGINT64_ELEMENTS,
TERMINAL_FAST_ELEMENTS_KIND = HOLEY_ELEMENTS,
FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND = PACKED_NONEXTENSIBLE_ELEMENTS,
LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND = HOLEY_FROZEN_ELEMENTS,
@@ -103,7 +136,7 @@ constexpr int kFastElementsKindCount =
constexpr int kFastElementsKindPackedToHoley =
HOLEY_SMI_ELEMENTS - PACKED_SMI_ELEMENTS;
-constexpr int kElementsKindBits = 5;
+constexpr int kElementsKindBits = 6;
STATIC_ASSERT((1 << kElementsKindBits) > LAST_ELEMENTS_KIND);
STATIC_ASSERT((1 << (kElementsKindBits - 1)) <= LAST_ELEMENTS_KIND);
@@ -150,8 +183,20 @@ inline bool IsTypedArrayElementsKind(ElementsKind kind) {
LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
}
+inline bool IsRabGsabTypedArrayElementsKind(ElementsKind kind) {
+ return base::IsInRange(kind, FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+}
+
+inline bool IsTypedArrayOrRabGsabTypedArrayElementsKind(ElementsKind kind) {
+ return base::IsInRange(kind, FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND,
+ LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+}
+
inline bool IsTerminalElementsKind(ElementsKind kind) {
- return kind == TERMINAL_FAST_ELEMENTS_KIND || IsTypedArrayElementsKind(kind);
+ return kind == TERMINAL_FAST_ELEMENTS_KIND ||
+ IsTypedArrayElementsKind(kind) ||
+ IsRabGsabTypedArrayElementsKind(kind);
}
inline bool IsFastElementsKind(ElementsKind kind) {
@@ -281,6 +326,13 @@ inline ElementsKind GetHoleyElementsKind(ElementsKind packed_kind) {
return packed_kind;
}
+inline ElementsKind GetCorrespondingRabGsabElementsKind(
+ ElementsKind typed_array_kind) {
+ DCHECK(IsTypedArrayElementsKind(typed_array_kind));
+ return ElementsKind(typed_array_kind - FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND +
+ FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+}
+
inline bool UnionElementsKindUptoPackedness(ElementsKind* a_out,
ElementsKind b) {
// Assert that the union of two ElementKinds can be computed via std::max.
diff --git a/chromium/v8/src/objects/elements.cc b/chromium/v8/src/objects/elements.cc
index 9b1c7936bb6..e70ae489008 100644
--- a/chromium/v8/src/objects/elements.cc
+++ b/chromium/v8/src/objects/elements.cc
@@ -4,6 +4,7 @@
#include "src/objects/elements.h"
+#include "src/base/atomicops.h"
#include "src/common/message-template.h"
#include "src/execution/arguments.h"
#include "src/execution/frames.h"
@@ -59,6 +60,17 @@
// - Uint8ClampedElementsAccessor
// - BigUint64ElementsAccessor
// - BigInt64ElementsAccessor
+// - RabGsabUint8ElementsAccessor
+// - RabGsabInt8ElementsAccessor
+// - RabGsabUint16ElementsAccessor
+// - RabGsabInt16ElementsAccessor
+// - RabGsabUint32ElementsAccessor
+// - RabGsabInt32ElementsAccessor
+// - RabGsabFloat32ElementsAccessor
+// - RabGsabFloat64ElementsAccessor
+// - RabGsabUint8ClampedElementsAccessor
+// - RabGsabBigUint64ElementsAccessor
+// - RabGsabBigInt64ElementsAccessor
// - DictionaryElementsAccessor
// - SloppyArgumentsElementsAccessor
// - FastSloppyArgumentsElementsAccessor
@@ -129,7 +141,19 @@ enum Where { AT_START, AT_END };
V(Float64ElementsAccessor, FLOAT64_ELEMENTS, ByteArray) \
V(Uint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, ByteArray) \
V(BigUint64ElementsAccessor, BIGUINT64_ELEMENTS, ByteArray) \
- V(BigInt64ElementsAccessor, BIGINT64_ELEMENTS, ByteArray)
+ V(BigInt64ElementsAccessor, BIGINT64_ELEMENTS, ByteArray) \
+ V(RabGsabUint8ElementsAccessor, RAB_GSAB_UINT8_ELEMENTS, ByteArray) \
+ V(RabGsabInt8ElementsAccessor, RAB_GSAB_INT8_ELEMENTS, ByteArray) \
+ V(RabGsabUint16ElementsAccessor, RAB_GSAB_UINT16_ELEMENTS, ByteArray) \
+ V(RabGsabInt16ElementsAccessor, RAB_GSAB_INT16_ELEMENTS, ByteArray) \
+ V(RabGsabUint32ElementsAccessor, RAB_GSAB_UINT32_ELEMENTS, ByteArray) \
+ V(RabGsabInt32ElementsAccessor, RAB_GSAB_INT32_ELEMENTS, ByteArray) \
+ V(RabGsabFloat32ElementsAccessor, RAB_GSAB_FLOAT32_ELEMENTS, ByteArray) \
+ V(RabGsabFloat64ElementsAccessor, RAB_GSAB_FLOAT64_ELEMENTS, ByteArray) \
+ V(RabGsabUint8ClampedElementsAccessor, RAB_GSAB_UINT8_CLAMPED_ELEMENTS, \
+ ByteArray) \
+ V(RabGsabBigUint64ElementsAccessor, RAB_GSAB_BIGUINT64_ELEMENTS, ByteArray) \
+ V(RabGsabBigInt64ElementsAccessor, RAB_GSAB_BIGINT64_ELEMENTS, ByteArray)
template <ElementsKind Kind>
class ElementsKindTraits {
@@ -639,57 +663,64 @@ class ElementsAccessorBase : public InternalElementsAccessor {
UNREACHABLE();
}
- void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
- PropertyAttributes attributes, uint32_t new_capacity) final {
- Subclass::AddImpl(object, index, value, attributes, new_capacity);
+ Maybe<bool> Add(Handle<JSObject> object, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes, uint32_t new_capacity) final {
+ return Subclass::AddImpl(object, index, value, attributes, new_capacity);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
- uint32_t Push(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) final {
+ Maybe<uint32_t> Push(Handle<JSArray> receiver, BuiltinArguments* args,
+ uint32_t push_size) final {
return Subclass::PushImpl(receiver, args, push_size);
}
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_sized) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_sized) {
UNREACHABLE();
}
- uint32_t Unshift(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t unshift_size) final {
+ Maybe<uint32_t> Unshift(Handle<JSArray> receiver, BuiltinArguments* args,
+ uint32_t unshift_size) final {
return Subclass::UnshiftImpl(receiver, args, unshift_size);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t unshift_size) {
+ static Maybe<uint32_t> UnshiftImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args,
+ uint32_t unshift_size) {
UNREACHABLE();
}
- Handle<Object> Pop(Handle<JSArray> receiver) final {
+ MaybeHandle<Object> Pop(Handle<JSArray> receiver) final {
return Subclass::PopImpl(receiver);
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- Handle<Object> Shift(Handle<JSArray> receiver) final {
+ MaybeHandle<Object> Shift(Handle<JSArray> receiver) final {
return Subclass::ShiftImpl(receiver);
}
- static Handle<Object> ShiftImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> ShiftImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- void SetLength(Handle<JSArray> array, uint32_t length) final {
- Subclass::SetLengthImpl(array->GetIsolate(), array, length,
- handle(array->elements(), array->GetIsolate()));
+ Maybe<bool> SetLength(Handle<JSArray> array, uint32_t length) final {
+ return Subclass::SetLengthImpl(
+ array->GetIsolate(), array, length,
+ handle(array->elements(), array->GetIsolate()));
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
DCHECK(!array->SetLengthWouldNormalize(length));
DCHECK(IsFastElementsKind(array->GetElementsKind()));
uint32_t old_length = 0;
@@ -735,11 +766,13 @@ class ElementsAccessorBase : public InternalElementsAccessor {
} else {
// Check whether the backing store should be expanded.
capacity = std::max(length, JSObject::NewElementsCapacity(capacity));
- Subclass::GrowCapacityAndConvertImpl(array, capacity);
+ MAYBE_RETURN(Subclass::GrowCapacityAndConvertImpl(array, capacity),
+ Nothing<bool>());
}
array->set_length(Smi::FromInt(length));
JSObject::ValidateElements(*array);
+ return Just(true);
}
size_t NumberOfElements(JSObject receiver) final {
@@ -765,23 +798,35 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::GetMaxIndex(receiver, elements);
}
- static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ static MaybeHandle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity) {
return ConvertElementsWithCapacity(object, old_elements, from_kind,
capacity, 0, 0);
}
- static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ static MaybeHandle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity, uint32_t src_index,
uint32_t dst_index) {
Isolate* isolate = object->GetIsolate();
Handle<FixedArrayBase> new_elements;
+ // TODO(victorgomes): Retrieve native context in optimized code
+ // and remove the check isolate->context().is_null().
if (IsDoubleElementsKind(kind())) {
+ if (!isolate->context().is_null() &&
+ !base::IsInRange(capacity, 0, FixedDoubleArray::kMaxLength)) {
+ return isolate->Throw<FixedArrayBase>(isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ }
new_elements = isolate->factory()->NewFixedDoubleArray(capacity);
} else {
- new_elements = isolate->factory()->NewUninitializedFixedArray(capacity);
+ if (!isolate->context().is_null() &&
+ !base::IsInRange(capacity, 0, FixedArray::kMaxLength)) {
+ return isolate->Throw<FixedArrayBase>(isolate->factory()->NewRangeError(
+ MessageTemplate::kInvalidArrayLength));
+ }
+ new_elements = isolate->factory()->NewFixedArray(capacity);
}
int packed_size = kPackedSizeNotKnown;
@@ -793,11 +838,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
from_kind, dst_index, packed_size,
kCopyToEndAndInitializeToHole);
- return new_elements;
+ return MaybeHandle<FixedArrayBase>(new_elements);
}
- static void TransitionElementsKindImpl(Handle<JSObject> object,
- Handle<Map> to_map) {
+ static Maybe<bool> TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> to_map) {
Isolate* isolate = object->GetIsolate();
Handle<Map> from_map = handle(object->map(), isolate);
ElementsKind from_kind = from_map->elements_kind();
@@ -822,8 +867,12 @@ class ElementsAccessorBase : public InternalElementsAccessor {
(IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
(IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
uint32_t capacity = static_cast<uint32_t>(object->elements().length());
- Handle<FixedArrayBase> elements = ConvertElementsWithCapacity(
- object, from_elements, from_kind, capacity);
+ Handle<FixedArrayBase> elements;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ object->GetIsolate(), elements,
+ ConvertElementsWithCapacity(object, from_elements, from_kind,
+ capacity),
+ Nothing<bool>());
JSObject::SetMapAndElements(object, to_map, elements);
}
if (FLAG_trace_elements_transitions) {
@@ -832,10 +881,11 @@ class ElementsAccessorBase : public InternalElementsAccessor {
handle(object->elements(), isolate));
}
}
+ return Just(true);
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
ElementsKind from_kind = object->GetElementsKind();
if (IsSmiOrObjectElementsKind(from_kind)) {
// Array optimizations rely on the prototype lookups of Array objects
@@ -850,15 +900,18 @@ class ElementsAccessorBase : public InternalElementsAccessor {
DCHECK(IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(kind()) ||
IsDictionaryElementsKind(from_kind) ||
static_cast<uint32_t>(old_elements->length()) < capacity);
- Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
- kind(), capacity);
+ return Subclass::BasicGrowCapacityAndConvertImpl(
+ object, old_elements, from_kind, kind(), capacity);
}
- static void BasicGrowCapacityAndConvertImpl(
+ static Maybe<bool> BasicGrowCapacityAndConvertImpl(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, ElementsKind to_kind, uint32_t capacity) {
- Handle<FixedArrayBase> elements =
- ConvertElementsWithCapacity(object, old_elements, from_kind, capacity);
+ Handle<FixedArrayBase> elements;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ object->GetIsolate(), elements,
+ ConvertElementsWithCapacity(object, old_elements, from_kind, capacity),
+ Nothing<bool>());
if (IsHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
@@ -873,40 +926,45 @@ class ElementsAccessorBase : public InternalElementsAccessor {
JSObject::PrintElementsTransition(stdout, object, from_kind, old_elements,
to_kind, elements);
}
+ return Just(true);
}
- void TransitionElementsKind(Handle<JSObject> object, Handle<Map> map) final {
- Subclass::TransitionElementsKindImpl(object, map);
+ Maybe<bool> TransitionElementsKind(Handle<JSObject> object,
+ Handle<Map> map) final {
+ return Subclass::TransitionElementsKindImpl(object, map);
}
- void GrowCapacityAndConvert(Handle<JSObject> object,
- uint32_t capacity) final {
- Subclass::GrowCapacityAndConvertImpl(object, capacity);
+ Maybe<bool> GrowCapacityAndConvert(Handle<JSObject> object,
+ uint32_t capacity) final {
+ return Subclass::GrowCapacityAndConvertImpl(object, capacity);
}
- bool GrowCapacity(Handle<JSObject> object, uint32_t index) final {
+ Maybe<bool> GrowCapacity(Handle<JSObject> object, uint32_t index) final {
// This function is intended to be called from optimized code. We don't
// want to trigger lazy deopts there, so refuse to handle cases that would.
if (object->map().is_prototype_map() ||
object->WouldConvertToSlowElements(index)) {
- return false;
+ return Just(false);
}
Handle<FixedArrayBase> old_elements(object->elements(),
object->GetIsolate());
uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1);
DCHECK(static_cast<uint32_t>(old_elements->length()) < new_capacity);
- Handle<FixedArrayBase> elements =
- ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity);
+ Handle<FixedArrayBase> elements;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ object->GetIsolate(), elements,
+ ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity),
+ Nothing<bool>());
DCHECK_EQ(object->GetElementsKind(), kind());
// Transition through the allocation site as well if present.
if (JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>(
object, kind())) {
- return false;
+ return Just(false);
}
object->set_elements(*elements);
- return true;
+ return Just(true);
}
void Delete(Handle<JSObject> obj, InternalIndex entry) final {
@@ -1220,13 +1278,14 @@ class ElementsAccessorBase : public InternalElementsAccessor {
return Subclass::GetCapacityImpl(holder, backing_store);
}
- static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
- size_t start, size_t end) {
+ static MaybeHandle<Object> FillImpl(Handle<JSObject> receiver,
+ Handle<Object> obj_value, size_t start,
+ size_t end) {
UNREACHABLE();
}
- Object Fill(Handle<JSObject> receiver, Handle<Object> obj_value, size_t start,
- size_t end) override {
+ MaybeHandle<Object> Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
+ size_t start, size_t end) override {
return Subclass::FillImpl(receiver, obj_value, start, end);
}
@@ -1348,9 +1407,9 @@ class DictionaryElementsAccessor
return dict.NumberOfElements();
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
Handle<NumberDictionary> dict =
Handle<NumberDictionary>::cast(backing_store);
uint32_t old_length = 0;
@@ -1401,6 +1460,7 @@ class DictionaryElementsAccessor
Handle<Object> length_obj = isolate->factory()->NewNumberFromUint(length);
array->set_length(*length_obj);
+ return Just(true);
}
static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
@@ -1466,9 +1526,10 @@ class DictionaryElementsAccessor
dictionary.DetailsAtPut(entry, details);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
Handle<NumberDictionary> dictionary =
object->HasFastElements() || object->HasFastStringWrapperElements()
@@ -1479,8 +1540,9 @@ class DictionaryElementsAccessor
object->GetIsolate(), dictionary, index, value, details);
new_dictionary->UpdateMaxNumberKey(index, object);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
- if (dictionary.is_identical_to(new_dictionary)) return;
+ if (dictionary.is_identical_to(new_dictionary)) return Just(true);
object->set_elements(*new_dictionary);
+ return Just(true);
}
static bool HasEntryImpl(Isolate* isolate, FixedArrayBase store,
@@ -1904,12 +1966,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
backing_store->set_the_hole(isolate, entry);
// TODO(verwaest): Move this out of elements.cc.
- // If an old space backing store is larger than a certain size and
+ // If the backing store is larger than a certain size and
// has too few used values, normalize it.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() < kMinLengthForSparsenessCheck) return;
- // TODO(ulan): Check if it works with young large objects.
- if (ObjectInYoungGeneration(*backing_store)) return;
uint32_t length = 0;
if (obj->IsJSArray()) {
JSArray::cast(*obj).length().ToArrayLength(&length);
@@ -1971,9 +2031,10 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
value, attributes);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
DCHECK_EQ(NONE, attributes);
ElementsKind from_kind = object->GetElementsKind();
ElementsKind to_kind = Subclass::kind();
@@ -1981,7 +2042,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind) ||
Subclass::GetCapacityImpl(*object, object->elements()) !=
new_capacity) {
- Subclass::GrowCapacityAndConvertImpl(object, new_capacity);
+ MAYBE_RETURN(Subclass::GrowCapacityAndConvertImpl(object, new_capacity),
+ Nothing<bool>());
} else {
if (IsFastElementsKind(from_kind) && from_kind != to_kind) {
JSObject::TransitionElementsKind(object, to_kind);
@@ -1992,6 +2054,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
Subclass::SetImpl(object, InternalIndex(index), *value);
+ return Just(true);
}
static void DeleteImpl(Handle<JSObject> obj, InternalIndex entry) {
@@ -2086,24 +2149,25 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
#endif
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) {
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
return Subclass::RemoveElement(receiver, AT_END);
}
- static Handle<Object> ShiftImpl(Handle<JSArray> receiver) {
+ static MaybeHandle<Object> ShiftImpl(Handle<JSArray> receiver) {
return Subclass::RemoveElement(receiver, AT_START);
}
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, push_size,
AT_END);
}
- static uint32_t UnshiftImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t unshift_size) {
+ static Maybe<uint32_t> UnshiftImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args,
+ uint32_t unshift_size) {
Handle<FixedArrayBase> backing_store(receiver->elements(),
receiver->GetIsolate());
return Subclass::AddArguments(receiver, backing_store, args, unshift_size,
@@ -2137,8 +2201,9 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
- static Object FillImpl(Handle<JSObject> receiver, Handle<Object> obj_value,
- size_t start, size_t end) {
+ static MaybeHandle<Object> FillImpl(Handle<JSObject> receiver,
+ Handle<Object> obj_value, size_t start,
+ size_t end) {
// Ensure indexes are within array bounds
DCHECK_LE(0, start);
DCHECK_LE(start, end);
@@ -2151,8 +2216,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// Make sure we have enough space.
DCHECK_LE(end, std::numeric_limits<uint32_t>::max());
if (end > Subclass::GetCapacityImpl(*receiver, receiver->elements())) {
- Subclass::GrowCapacityAndConvertImpl(receiver,
- static_cast<uint32_t>(end));
+ MAYBE_RETURN_NULL(Subclass::GrowCapacityAndConvertImpl(
+ receiver, static_cast<uint32_t>(end)));
CHECK_EQ(Subclass::kind(), receiver->GetElementsKind());
}
DCHECK_LE(end, Subclass::GetCapacityImpl(*receiver, receiver->elements()));
@@ -2160,7 +2225,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (size_t index = start; index < end; ++index) {
Subclass::SetImpl(receiver, InternalIndex(index), *obj_value);
}
- return *receiver;
+ return MaybeHandle<Object>(receiver);
}
static Maybe<bool> IncludesValueImpl(Isolate* isolate,
@@ -2322,8 +2387,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return result;
}
- static Handle<Object> RemoveElement(Handle<JSArray> receiver,
- Where remove_position) {
+ static MaybeHandle<Object> RemoveElement(Handle<JSArray> receiver,
+ Where remove_position) {
Isolate* isolate = receiver->GetIsolate();
ElementsKind kind = KindTraits::Kind;
if (IsSmiOrObjectElementsKind(kind)) {
@@ -2341,24 +2406,26 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
0, 0);
}
- Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store);
+ MAYBE_RETURN_NULL(
+ Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store));
if (IsHoleyElementsKind(kind) && result->IsTheHole(isolate)) {
return isolate->factory()->undefined_value();
}
- return result;
+ return MaybeHandle<Object>(result);
}
- static uint32_t AddArguments(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store,
- BuiltinArguments* args, uint32_t add_size,
- Where add_position) {
+ static Maybe<uint32_t> AddArguments(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ BuiltinArguments* args, uint32_t add_size,
+ Where add_position) {
uint32_t length = Smi::ToInt(receiver->length());
DCHECK_LT(0, add_size);
uint32_t elms_len = backing_store->length();
// Check we do not overflow the new_length.
DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length));
uint32_t new_length = length + add_size;
+ Isolate* isolate = receiver->GetIsolate();
if (new_length > elms_len) {
// New backing storage is needed.
@@ -2366,14 +2433,16 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// If we add arguments to the start we have to shift the existing objects.
int copy_dst_index = add_position == AT_START ? add_size : 0;
// Copy over all objects to a new backing_store.
- backing_store = Subclass::ConvertElementsWithCapacity(
- receiver, backing_store, KindTraits::Kind, capacity, 0,
- copy_dst_index);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, backing_store,
+ Subclass::ConvertElementsWithCapacity(receiver, backing_store,
+ KindTraits::Kind, capacity, 0,
+ copy_dst_index),
+ Nothing<uint32_t>());
receiver->set_elements(*backing_store);
} else if (add_position == AT_START) {
// If the backing store has enough capacity and we add elements to the
// start we have to shift the existing objects.
- Isolate* isolate = receiver->GetIsolate();
Subclass::MoveElements(isolate, receiver, backing_store, add_size, 0,
length, 0, 0);
}
@@ -2383,7 +2452,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Subclass::CopyArguments(args, backing_store, add_size, 1, insertion_index);
// Set the length.
receiver->set_length(Smi::FromInt(new_length));
- return new_length;
+ return Just(new_length);
}
static void CopyArguments(BuiltinArguments* args,
@@ -2469,6 +2538,7 @@ class FastSmiOrObjectElementsAccessor
case SLOW_STRING_WRAPPER_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// This function is currently only used for JSArrays with non-zero
// length.
@@ -2569,28 +2639,29 @@ class FastNonextensibleObjectElementsAccessor
public:
using BackingStore = typename KindTraits::BackingStore;
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
UNREACHABLE();
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
// TODO(duongn): refactor this due to code duplication of sealed version.
// Consider using JSObject::NormalizeElements(). Also consider follow the fast
// element logic instead of changing to dictionary mode.
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
uint32_t old_length = 0;
CHECK(array->length().ToArrayIndex(&old_length));
if (length == old_length) {
// Do nothing.
- return;
+ return Just(true);
}
// Transition to DICTIONARY_ELEMENTS.
@@ -2622,8 +2693,8 @@ class FastNonextensibleObjectElementsAccessor
// Set length.
Handle<FixedArrayBase> new_backing_store(array->elements(), isolate);
- DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
- new_backing_store);
+ return DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
+ new_backing_store);
}
};
@@ -2662,30 +2733,33 @@ class FastSealedObjectElementsAccessor
UNREACHABLE();
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
UNREACHABLE();
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
// TODO(duongn): refactor this due to code duplication of nonextensible
// version. Consider using JSObject::NormalizeElements(). Also consider follow
// the fast element logic instead of changing to dictionary mode.
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
uint32_t old_length = 0;
CHECK(array->length().ToArrayIndex(&old_length));
if (length == old_length) {
// Do nothing.
- return;
+ return Just(true);
}
// Transition to DICTIONARY_ELEMENTS.
@@ -2717,8 +2791,8 @@ class FastSealedObjectElementsAccessor
// Set length
Handle<FixedArrayBase> new_backing_store(array->elements(), isolate);
- DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
- new_backing_store);
+ return DictionaryElementsAccessor::SetLengthImpl(isolate, array, length,
+ new_backing_store);
}
};
@@ -2772,22 +2846,25 @@ class FastFrozenObjectElementsAccessor
UNREACHABLE();
}
- static Handle<Object> PopImpl(Handle<JSArray> receiver) { UNREACHABLE(); }
+ static MaybeHandle<Object> PopImpl(Handle<JSArray> receiver) {
+ UNREACHABLE();
+ }
- static uint32_t PushImpl(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) {
+ static Maybe<uint32_t> PushImpl(Handle<JSArray> receiver,
+ BuiltinArguments* args, uint32_t push_size) {
UNREACHABLE();
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
UNREACHABLE();
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
UNREACHABLE();
}
@@ -2877,6 +2954,7 @@ class FastDoubleElementsAccessor
case NO_ELEMENTS:
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// This function is currently only used for JSArrays with non-zero
// length.
@@ -2952,6 +3030,8 @@ class FastHoleyDoubleElementsAccessor
FastHoleyDoubleElementsAccessor,
ElementsKindTraits<HOLEY_DOUBLE_ELEMENTS>> {};
+enum IsSharedBuffer : bool { kShared = true, kUnshared = false };
+
// Super class for all external element arrays.
template <ElementsKind Kind, typename ElementType>
class TypedElementsAccessor
@@ -2998,42 +3078,70 @@ class TypedElementsAccessor
static void SetImpl(Handle<JSObject> holder, InternalIndex entry,
Object value) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
- DCHECK_LE(entry.raw_value(), typed_array->length());
- SetImpl(static_cast<ElementType*>(typed_array->DataPtr()),
- entry.raw_value(), FromObject(value));
- }
+ DCHECK_LE(entry.raw_value(), typed_array->GetLength());
+ auto* entry_ptr =
+ static_cast<ElementType*>(typed_array->DataPtr()) + entry.raw_value();
+ auto is_shared = typed_array->buffer().is_shared() ? kShared : kUnshared;
+ SetImpl(entry_ptr, FromObject(value), is_shared);
+ }
+
+ static void SetImpl(ElementType* data_ptr, ElementType value,
+ IsSharedBuffer is_shared) {
+ // TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
+ // fields (external pointers, doubles and BigInt data) are not always 8-byte
+ // aligned. This is relying on undefined behaviour in C++, since {data_ptr}
+ // is not aligned to {alignof(ElementType)}.
+ if (!is_shared) {
+ base::WriteUnalignedValue(reinterpret_cast<Address>(data_ptr), value);
+ return;
+ }
- static void SetImpl(ElementType* data_ptr, size_t entry, ElementType value) {
// The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
- // racy accesses and warn about them, so we disable TSAN for these reads
- // and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only
- // kTaggedSize aligned so we have to use unaligned pointer friendly way of
- // accessing them in order to avoid undefined behavior in C++ code.
- base::WriteUnalignedValue<ElementType>(
- reinterpret_cast<Address>(data_ptr + entry), value);
- } else {
- data_ptr[entry] = value;
+ // SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
+ // required for JavaScript, but will avoid undefined behaviour in C++ and is
+ // unlikely to introduce noticable overhead.
+ if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr),
+ alignof(std::atomic<ElementType>))) {
+ // Use a single relaxed atomic store.
+ STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
+ reinterpret_cast<std::atomic<ElementType>*>(data_ptr)->store(
+ value, std::memory_order_relaxed);
+ return;
+ }
+
+ // Some static CHECKs (are optimized out if succeeding) to ensure that
+ // {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
+ // has size and alignment of four bytes, such that we can cast the
+ // {data_ptr} to it.
+ CHECK_LE(kInt32Size, alignof(ElementType));
+ CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
+ CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
+ // And dynamically check that we indeed have at least four byte alignment.
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
+ // Store as multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
+ // warnings for the empty array or memcpy to an empty object.
+ constexpr size_t kNumWords =
+ std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
+ uint32_t words[kNumWords];
+ CHECK_EQ(sizeof(words), sizeof(value));
+ memcpy(words, &value, sizeof(value));
+ for (size_t word = 0; word < kNumWords; ++word) {
+ STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
+ reinterpret_cast<std::atomic<uint32_t>*>(data_ptr)[word].store(
+ words[word], std::memory_order_relaxed);
}
- TSAN_ANNOTATE_IGNORE_WRITES_END;
}
static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
InternalIndex entry) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(holder);
Isolate* isolate = typed_array->GetIsolate();
- DCHECK_LE(entry.raw_value(), typed_array->length());
+ DCHECK_LT(entry.raw_value(), typed_array->GetLength());
DCHECK(!typed_array->WasDetached());
- ElementType elem = GetImpl(
- static_cast<ElementType*>(typed_array->DataPtr()), entry.raw_value());
+ auto* element_ptr =
+ static_cast<ElementType*>(typed_array->DataPtr()) + entry.raw_value();
+ auto is_shared = typed_array->buffer().is_shared() ? kShared : kUnshared;
+ ElementType elem = GetImpl(element_ptr, is_shared);
return ToHandle(isolate, elem);
}
@@ -3042,28 +3150,53 @@ class TypedElementsAccessor
UNREACHABLE();
}
- static ElementType GetImpl(ElementType* data_ptr, size_t entry) {
+ static ElementType GetImpl(ElementType* data_ptr, IsSharedBuffer is_shared) {
+ // TODO(ishell, v8:8875): Independent of pointer compression, 8-byte size
+ // fields (external pointers, doubles and BigInt data) are not always
+ // 8-byte aligned.
+ if (!is_shared) {
+ return base::ReadUnalignedValue<ElementType>(
+ reinterpret_cast<Address>(data_ptr));
+ }
+
// The JavaScript memory model allows for racy reads and writes to a
- // SharedArrayBuffer's backing store. ThreadSanitizer will catch these
- // racy accesses and warn about them, so we disable TSAN for these reads
- // and writes using annotations.
- //
- // We don't use relaxed atomics here, as it is not a requirement of the
- // JavaScript memory model to have tear-free reads of overlapping accesses,
- // and using relaxed atomics may introduce overhead.
- TSAN_ANNOTATE_IGNORE_READS_BEGIN;
- ElementType result;
- if (COMPRESS_POINTERS_BOOL && alignof(ElementType) > kTaggedSize) {
- // TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
- // fields (external pointers, doubles and BigInt data) are only
- // kTaggedSize aligned so we have to use unaligned pointer friendly way of
- // accessing them in order to avoid undefined behavior in C++ code.
- result = base::ReadUnalignedValue<ElementType>(
- reinterpret_cast<Address>(data_ptr + entry));
- } else {
- result = data_ptr[entry];
+ // SharedArrayBuffer's backing store. Using relaxed atomics is not strictly
+ // required for JavaScript, but will avoid undefined behaviour in C++ and is
+ // unlikely to introduce noticable overhead.
+ if (IsAligned(reinterpret_cast<uintptr_t>(data_ptr),
+ alignof(std::atomic<ElementType>))) {
+ // Use a single relaxed atomic load.
+ STATIC_ASSERT(sizeof(std::atomic<ElementType>) == sizeof(ElementType));
+ // Note: acquire semantics are not needed here, but clang seems to merge
+ // this atomic load with the non-atomic load above if we use relaxed
+ // semantics. This will result in TSan failures.
+ return reinterpret_cast<std::atomic<ElementType>*>(data_ptr)->load(
+ std::memory_order_acquire);
+ }
+
+ // Some static CHECKs (are optimized out if succeeding) to ensure that
+ // {data_ptr} is at least four byte aligned, and {std::atomic<uint32_t>}
+ // has size and alignment of four bytes, such that we can cast the
+ // {data_ptr} to it.
+ CHECK_LE(kInt32Size, alignof(ElementType));
+ CHECK_EQ(kInt32Size, alignof(std::atomic<uint32_t>));
+ CHECK_EQ(kInt32Size, sizeof(std::atomic<uint32_t>));
+ // And dynamically check that we indeed have at least four byte alignment.
+ DCHECK(IsAligned(reinterpret_cast<uintptr_t>(data_ptr), kInt32Size));
+ // Load in multiple 32-bit words. Make {kNumWords} >= 1 to avoid compiler
+ // warnings for the empty array or memcpy to an empty object.
+ constexpr size_t kNumWords =
+ std::max(size_t{1}, sizeof(ElementType) / kInt32Size);
+ uint32_t words[kNumWords];
+ for (size_t word = 0; word < kNumWords; ++word) {
+ STATIC_ASSERT(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t));
+ words[word] =
+ reinterpret_cast<std::atomic<uint32_t>*>(data_ptr)[word].load(
+ std::memory_order_relaxed);
}
- TSAN_ANNOTATE_IGNORE_READS_END;
+ ElementType result;
+ CHECK_EQ(sizeof(words), sizeof(result));
+ memcpy(&result, words, sizeof(result));
return result;
}
@@ -3086,9 +3219,9 @@ class TypedElementsAccessor
return false;
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> backing_store) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
}
@@ -3111,8 +3244,7 @@ class TypedElementsAccessor
static size_t GetCapacityImpl(JSObject holder, FixedArrayBase backing_store) {
JSTypedArray typed_array = JSTypedArray::cast(holder);
- if (typed_array.WasDetached()) return 0;
- return typed_array.length();
+ return typed_array.GetLength();
}
static size_t NumberOfElementsImpl(JSObject receiver,
@@ -3155,8 +3287,9 @@ class TypedElementsAccessor
return Just(true);
}
- static Object FillImpl(Handle<JSObject> receiver, Handle<Object> value,
- size_t start, size_t end) {
+ static MaybeHandle<Object> FillImpl(Handle<JSObject> receiver,
+ Handle<Object> value, size_t start,
+ size_t end) {
Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(receiver);
DCHECK(!typed_array->WasDetached());
DCHECK_LE(start, end);
@@ -3171,7 +3304,7 @@ class TypedElementsAccessor
} else {
std::fill(data + start, data + end, scalar);
}
- return *typed_array;
+ return MaybeHandle<Object>(typed_array);
}
static Maybe<bool> IncludesValueImpl(Isolate* isolate,
@@ -3200,6 +3333,7 @@ class TypedElementsAccessor
ElementType typed_search_value;
ElementType* data_ptr =
reinterpret_cast<ElementType*>(typed_array.DataPtr());
+ auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
if (Kind == BIGINT64_ELEMENTS || Kind == BIGUINT64_ELEMENTS) {
if (!value->IsBigInt()) return Just(false);
bool lossless;
@@ -3215,8 +3349,8 @@ class TypedElementsAccessor
}
if (std::isnan(search_value)) {
for (size_t k = start_from; k < length; ++k) {
- double elem_k =
- static_cast<double>(AccessorClass::GetImpl(data_ptr, k));
+ double elem_k = static_cast<double>(
+ AccessorClass::GetImpl(data_ptr + k, is_shared));
if (std::isnan(elem_k)) return Just(true);
}
return Just(false);
@@ -3233,7 +3367,7 @@ class TypedElementsAccessor
}
for (size_t k = start_from; k < length; ++k) {
- ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just(true);
}
return Just(false);
@@ -3285,8 +3419,9 @@ class TypedElementsAccessor
length = typed_array.length();
}
+ auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
for (size_t k = start_from; k < length; ++k) {
- ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just<int64_t>(k);
}
return Just<int64_t>(-1);
@@ -3333,8 +3468,9 @@ class TypedElementsAccessor
DCHECK_LT(start_from, typed_array.length());
size_t k = start_from;
+ auto is_shared = typed_array.buffer().is_shared() ? kShared : kUnshared;
do {
- ElementType elem_k = AccessorClass::GetImpl(data_ptr, k);
+ ElementType elem_k = AccessorClass::GetImpl(data_ptr + k, is_shared);
if (elem_k == typed_search_value) return Just<int64_t>(k);
} while (k-- != 0);
return Just<int64_t>(-1);
@@ -3384,12 +3520,16 @@ class TypedElementsAccessor
size_t count = end - start;
DCHECK_LE(count, destination.length());
ElementType* dest_data = static_cast<ElementType*>(destination.DataPtr());
+ auto is_shared =
+ source.buffer().is_shared() || destination.buffer().is_shared()
+ ? kShared
+ : kUnshared;
switch (source.GetElementsKind()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: { \
ctype* source_data = reinterpret_cast<ctype*>(source.DataPtr()) + start; \
CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>(source_data, dest_data, \
- count); \
+ count, is_shared); \
break; \
}
TYPED_ARRAYS(TYPED_ARRAY_CASE)
@@ -3408,16 +3548,16 @@ class TypedElementsAccessor
template <ElementsKind SourceKind, typename SourceElementType>
static void CopyBetweenBackingStores(SourceElementType* source_data_ptr,
ElementType* dest_data_ptr,
- size_t length) {
- DisallowGarbageCollection no_gc;
- for (size_t i = 0; i < length; i++) {
+ size_t length,
+ IsSharedBuffer is_shared) {
+ for (; length > 0; --length, ++source_data_ptr, ++dest_data_ptr) {
// We use scalar accessors to avoid boxing/unboxing, so there are no
// allocations.
SourceElementType source_elem =
TypedElementsAccessor<SourceKind, SourceElementType>::GetImpl(
- source_data_ptr, i);
+ source_data_ptr, is_shared);
ElementType dest_elem = FromScalar(source_elem);
- SetImpl(dest_data_ptr, i, dest_elem);
+ SetImpl(dest_data_ptr, dest_elem, is_shared);
}
}
@@ -3448,14 +3588,24 @@ class TypedElementsAccessor
size_t source_byte_length = source.byte_length();
size_t dest_byte_length = destination.byte_length();
+ bool source_shared = source.buffer().is_shared();
+ bool destination_shared = destination.buffer().is_shared();
+
// We can simply copy the backing store if the types are the same, or if
// we are converting e.g. Uint8 <-> Int8, as the binary representation
// will be the same. This is not the case for floats or clamped Uint8,
// which have special conversion operations.
if (same_type || (same_size && both_are_simple)) {
size_t element_size = source.element_size();
- std::memmove(dest_data + offset * element_size, source_data,
- length * element_size);
+ if (source_shared || destination_shared) {
+ base::Relaxed_Memcpy(
+ reinterpret_cast<base::Atomic8*>(dest_data + offset * element_size),
+ reinterpret_cast<base::Atomic8*>(source_data),
+ length * element_size);
+ } else {
+ std::memmove(dest_data + offset * element_size, source_data,
+ length * element_size);
+ }
} else {
std::unique_ptr<uint8_t[]> cloned_source_elements;
@@ -3463,17 +3613,25 @@ class TypedElementsAccessor
if (dest_data + dest_byte_length > source_data &&
source_data + source_byte_length > dest_data) {
cloned_source_elements.reset(new uint8_t[source_byte_length]);
- std::memcpy(cloned_source_elements.get(), source_data,
- source_byte_length);
+ if (source_shared) {
+ base::Relaxed_Memcpy(
+ reinterpret_cast<base::Atomic8*>(cloned_source_elements.get()),
+ reinterpret_cast<base::Atomic8*>(source_data),
+ source_byte_length);
+ } else {
+ std::memcpy(cloned_source_elements.get(), source_data,
+ source_byte_length);
+ }
source_data = cloned_source_elements.get();
}
switch (source.GetElementsKind()) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case TYPE##_ELEMENTS: \
- CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \
- reinterpret_cast<ctype*>(source_data), \
- reinterpret_cast<ElementType*>(dest_data) + offset, length); \
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<TYPE##_ELEMENTS, ctype>( \
+ reinterpret_cast<ctype*>(source_data), \
+ reinterpret_cast<ElementType*>(dest_data) + offset, length, \
+ source_shared || destination_shared ? kShared : kUnshared); \
break;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
default:
@@ -3529,6 +3687,9 @@ class TypedElementsAccessor
ElementsKind kind = source.GetElementsKind();
+ auto destination_shared =
+ destination.buffer().is_shared() ? kShared : kUnshared;
+
// When we find the hole, we normally have to look up the element on the
// prototype chain, which is not handled here and we return false instead.
// When the array has the original array prototype, and that prototype has
@@ -3546,17 +3707,19 @@ class TypedElementsAccessor
for (size_t i = 0; i < length; i++) {
Object elem = source_store.get(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
+ SetImpl(dest_data + i, FromScalar(Smi::ToInt(elem)),
+ destination_shared);
}
return true;
} else if (kind == HOLEY_SMI_ELEMENTS) {
FixedArray source_store = FixedArray::cast(source.elements());
for (size_t i = 0; i < length; i++) {
if (source_store.is_the_hole(isolate, static_cast<int>(i))) {
- SetImpl(dest_data, i, FromObject(undefined));
+ SetImpl(dest_data + i, FromObject(undefined), destination_shared);
} else {
Object elem = source_store.get(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(Smi::ToInt(elem)));
+ SetImpl(dest_data + i, FromScalar(Smi::ToInt(elem)),
+ destination_shared);
}
}
return true;
@@ -3569,17 +3732,17 @@ class TypedElementsAccessor
// Use the from_double conversion for this specific TypedArray type,
// rather than relying on C++ to convert elem.
double elem = source_store.get_scalar(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(elem));
+ SetImpl(dest_data + i, FromScalar(elem), destination_shared);
}
return true;
} else if (kind == HOLEY_DOUBLE_ELEMENTS) {
FixedDoubleArray source_store = FixedDoubleArray::cast(source.elements());
for (size_t i = 0; i < length; i++) {
if (source_store.is_the_hole(static_cast<int>(i))) {
- SetImpl(dest_data, i, FromObject(undefined));
+ SetImpl(dest_data + i, FromObject(undefined), destination_shared);
} else {
double elem = source_store.get_scalar(static_cast<int>(i));
- SetImpl(dest_data, i, FromScalar(elem));
+ SetImpl(dest_data + i, FromScalar(elem), destination_shared);
}
}
return true;
@@ -3872,9 +4035,225 @@ Handle<Object> TypedElementsAccessor<BIGUINT64_ELEMENTS, uint64_t>::ToHandle(
return BigInt::FromUint64(isolate, value);
}
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_INT8_ELEMENTS, int8_t>::ToHandle(
+ Isolate* isolate, int8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT8_ELEMENTS,
+ uint8_t>::ToHandle(Isolate* isolate,
+ uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_INT16_ELEMENTS,
+ int16_t>::ToHandle(Isolate* isolate,
+ int16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT16_ELEMENTS,
+ uint16_t>::ToHandle(Isolate* isolate,
+ uint16_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_INT32_ELEMENTS,
+ int32_t>::ToHandle(Isolate* isolate,
+ int32_t value) {
+ return isolate->factory()->NewNumberFromInt(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT32_ELEMENTS,
+ uint32_t>::ToHandle(Isolate* isolate,
+ uint32_t value) {
+ return isolate->factory()->NewNumberFromUint(value);
+}
+
+// static
+template <>
+float TypedElementsAccessor<RAB_GSAB_FLOAT32_ELEMENTS, float>::FromScalar(
+ double value) {
+ return DoubleToFloat32(value);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_FLOAT32_ELEMENTS,
+ float>::ToHandle(Isolate* isolate,
+ float value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+double TypedElementsAccessor<RAB_GSAB_FLOAT64_ELEMENTS, double>::FromScalar(
+ double value) {
+ return value;
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_FLOAT64_ELEMENTS,
+ double>::ToHandle(Isolate* isolate,
+ double value) {
+ return isolate->factory()->NewNumber(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::FromScalar(int value) {
+ if (value < 0x00) return 0x00;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::FromScalar(uint32_t value) {
+ // We need this special case for Uint32 -> Uint8Clamped, because the highest
+ // Uint32 values will be negative as an int, clamping to 0, rather than 255.
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
+
+// static
+template <>
+uint8_t TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::FromScalar(double value) {
+ // Handle NaNs and less than zero values which clamp to zero.
+ if (!(value > 0)) return 0;
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(lrint(value));
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_UINT8_CLAMPED_ELEMENTS,
+ uint8_t>::ToHandle(Isolate* isolate,
+ uint8_t value) {
+ return handle(Smi::FromInt(value), isolate);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ int64_t value) {
+ return value;
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromScalar(
+ uint64_t value) {
+ return static_cast<int64_t>(value);
+}
+
+// static
+template <>
+int64_t TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS, int64_t>::FromObject(
+ Object value, bool* lossless) {
+ return BigInt::cast(value).AsInt64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_BIGINT64_ELEMENTS,
+ int64_t>::ToHandle(Isolate* isolate,
+ int64_t value) {
+ return BigInt::FromInt64(isolate, value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(int value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(uint32_t value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(double value) {
+ UNREACHABLE();
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(int64_t value) {
+ return static_cast<uint64_t>(value);
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromScalar(uint64_t value) {
+ return value;
+}
+
+// static
+template <>
+uint64_t TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::FromObject(Object value,
+ bool* lossless) {
+ return BigInt::cast(value).AsUint64(lossless);
+}
+
+// static
+template <>
+Handle<Object> TypedElementsAccessor<RAB_GSAB_BIGUINT64_ELEMENTS,
+ uint64_t>::ToHandle(Isolate* isolate,
+ uint64_t value) {
+ return BigInt::FromUint64(isolate, value);
+}
+
#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype) \
using Type##ElementsAccessor = TypedElementsAccessor<TYPE##_ELEMENTS, ctype>;
TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
+RAB_GSAB_TYPED_ARRAYS(FIXED_ELEMENTS_ACCESSOR)
#undef FIXED_ELEMENTS_ACCESSOR
template <typename Subclass, typename ArgumentsAccessor, typename KindTraits>
@@ -3894,7 +4273,7 @@ class SloppyArgumentsElementsAccessor
if (entry.as_uint32() < length) {
// Read context mapped entry.
DisallowGarbageCollection no_gc;
- Object probe = elements->mapped_entries(entry.as_uint32());
+ Object probe = elements->mapped_entries(entry.as_uint32(), kRelaxedLoad);
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -3908,13 +4287,13 @@ class SloppyArgumentsElementsAccessor
}
}
- static void TransitionElementsKindImpl(Handle<JSObject> object,
- Handle<Map> map) {
+ static Maybe<bool> TransitionElementsKindImpl(Handle<JSObject> object,
+ Handle<Map> map) {
UNREACHABLE();
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
UNREACHABLE();
}
@@ -3930,7 +4309,7 @@ class SloppyArgumentsElementsAccessor
if (entry.as_uint32() < length) {
// Store context mapped entry.
DisallowGarbageCollection no_gc;
- Object probe = elements.mapped_entries(entry.as_uint32());
+ Object probe = elements.mapped_entries(entry.as_uint32(), kRelaxedLoad);
DCHECK(!probe.IsTheHole());
Context context = Context::cast(elements.context());
int context_entry = Smi::ToInt(probe);
@@ -3953,9 +4332,9 @@ class SloppyArgumentsElementsAccessor
}
}
- static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
- uint32_t length,
- Handle<FixedArrayBase> parameter_map) {
+ static Maybe<bool> SetLengthImpl(Isolate* isolate, Handle<JSArray> array,
+ uint32_t length,
+ Handle<FixedArrayBase> parameter_map) {
// Sloppy arguments objects are not arrays.
UNREACHABLE();
}
@@ -4063,7 +4442,7 @@ class SloppyArgumentsElementsAccessor
size_t index) {
uint32_t length = elements.length();
if (index >= length) return false;
- return !elements.mapped_entries(static_cast<uint32_t>(index))
+ return !elements.mapped_entries(static_cast<uint32_t>(index), kRelaxedLoad)
.IsTheHole(isolate);
}
@@ -4118,7 +4497,8 @@ class SloppyArgumentsElementsAccessor
uint32_t length = elements->length();
for (uint32_t i = 0; i < length; ++i) {
- if (elements->mapped_entries(i).IsTheHole(isolate)) continue;
+ if (elements->mapped_entries(i, kRelaxedLoad).IsTheHole(isolate))
+ continue;
if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
@@ -4251,9 +4631,10 @@ class SlowSloppyArgumentsElementsAccessor
NumberDictionary::DeleteEntry(isolate, dict, entry.adjust_down(length));
elements->set_arguments(*dict);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
@@ -4270,6 +4651,7 @@ class SlowSloppyArgumentsElementsAccessor
if (*dictionary != *new_dictionary) {
elements->set_arguments(*new_dictionary);
}
+ return Just(true);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -4281,7 +4663,7 @@ class SlowSloppyArgumentsElementsAccessor
Handle<SloppyArgumentsElements>::cast(store);
uint32_t length = elements->length();
if (entry.as_uint32() < length) {
- Object probe = elements->mapped_entries(entry.as_uint32());
+ Object probe = elements->mapped_entries(entry.as_uint32(), kRelaxedLoad);
DCHECK(!probe.IsTheHole(isolate));
Context context = elements->context();
int context_entry = Smi::ToInt(probe);
@@ -4365,9 +4747,10 @@ class FastSloppyArgumentsElementsAccessor
SlowSloppyArgumentsElementsAccessor::SloppyDeleteImpl(obj, elements, entry);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
DCHECK_EQ(NONE, attributes);
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements(
@@ -4375,7 +4758,8 @@ class FastSloppyArgumentsElementsAccessor
Handle<FixedArray> old_arguments(elements->arguments(), isolate);
if (old_arguments->IsNumberDictionary() ||
static_cast<uint32_t>(old_arguments->length()) < new_capacity) {
- GrowCapacityAndConvertImpl(object, new_capacity);
+ MAYBE_RETURN(GrowCapacityAndConvertImpl(object, new_capacity),
+ Nothing<bool>());
}
FixedArray arguments = elements->arguments();
// For fast holey objects, the entry equals the index. The code above made
@@ -4385,6 +4769,7 @@ class FastSloppyArgumentsElementsAccessor
// kMaxUInt32.
FastHoleyObjectElementsAccessor::SetImpl(arguments, InternalIndex(index),
*value);
+ return Just(true);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -4414,8 +4799,8 @@ class FastSloppyArgumentsElementsAccessor
}
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
Isolate* isolate = object->GetIsolate();
Handle<SloppyArgumentsElements> elements(
SloppyArgumentsElements::cast(object->elements()), isolate);
@@ -4426,13 +4811,17 @@ class FastSloppyArgumentsElementsAccessor
// elements.
DCHECK(from_kind == SLOW_SLOPPY_ARGUMENTS_ELEMENTS ||
static_cast<uint32_t>(old_arguments->length()) < capacity);
- Handle<FixedArrayBase> arguments =
- ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity);
+ Handle<FixedArrayBase> arguments;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, arguments,
+ ConvertElementsWithCapacity(object, old_arguments, from_kind, capacity),
+ Nothing<bool>());
Handle<Map> new_map = JSObject::GetElementsTransitionMap(
object, FAST_SLOPPY_ARGUMENTS_ELEMENTS);
JSObject::MigrateToMap(isolate, object, new_map);
elements->set_arguments(FixedArray::cast(*arguments));
JSObject::ValidateElements(*object);
+ return Just(true);
}
};
@@ -4504,9 +4893,10 @@ class StringWrapperElementsAccessor
value);
}
- static void AddImpl(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) {
+ static Maybe<bool> AddImpl(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) {
DCHECK(index >= static_cast<uint32_t>(GetString(*object).length()));
// Explicitly grow fast backing stores if needed. Dictionaries know how to
// extend their capacity themselves.
@@ -4514,10 +4904,12 @@ class StringWrapperElementsAccessor
(object->GetElementsKind() == SLOW_STRING_WRAPPER_ELEMENTS ||
BackingStoreAccessor::GetCapacityImpl(*object, object->elements()) !=
new_capacity)) {
- GrowCapacityAndConvertImpl(object, new_capacity);
+ MAYBE_RETURN(GrowCapacityAndConvertImpl(object, new_capacity),
+ Nothing<bool>());
}
BackingStoreAccessor::AddImpl(object, index, value, attributes,
new_capacity);
+ return Just(true);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -4562,8 +4954,8 @@ class StringWrapperElementsAccessor
backing_store, keys);
}
- static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
- uint32_t capacity) {
+ static Maybe<bool> GrowCapacityAndConvertImpl(Handle<JSObject> object,
+ uint32_t capacity) {
Handle<FixedArrayBase> old_elements(object->elements(),
object->GetIsolate());
ElementsKind from_kind = object->GetElementsKind();
@@ -4578,9 +4970,9 @@ class StringWrapperElementsAccessor
// elements.
DCHECK(from_kind == SLOW_STRING_WRAPPER_ELEMENTS ||
static_cast<uint32_t>(old_elements->length()) < capacity);
- Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind,
- FAST_STRING_WRAPPER_ELEMENTS,
- capacity);
+ return Subclass::BasicGrowCapacityAndConvertImpl(
+ object, old_elements, from_kind, FAST_STRING_WRAPPER_ELEMENTS,
+ capacity);
}
static void CopyElementsImpl(Isolate* isolate, FixedArrayBase from,
@@ -4665,7 +5057,7 @@ MaybeHandle<Object> ArrayConstructInitializeElements(
} else {
// Take the argument as the length.
JSArray::Initialize(array, 0);
- JSArray::SetLength(array, length);
+ MAYBE_RETURN_NULL(JSArray::SetLength(array, length));
}
return array;
}
diff --git a/chromium/v8/src/objects/elements.h b/chromium/v8/src/objects/elements.h
index 4a34e866f2f..23dbe1d34f1 100644
--- a/chromium/v8/src/objects/elements.h
+++ b/chromium/v8/src/objects/elements.h
@@ -66,7 +66,8 @@ class ElementsAccessor {
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
// have non-deletable elements can only be shrunk to the size of highest
// element that is non-deletable.
- virtual void SetLength(Handle<JSArray> holder, uint32_t new_length) = 0;
+ V8_WARN_UNUSED_RESULT virtual Maybe<bool> SetLength(Handle<JSArray> holder,
+ uint32_t new_length) = 0;
// Copy all indices that have elements from |object| into the given
// KeyAccumulator. For Dictionary-based element-kinds we filter out elements
@@ -96,13 +97,14 @@ class ElementsAccessor {
Handle<JSObject> receiver, KeyAccumulator* accumulator,
AddKeyConversion convert) = 0;
- virtual void TransitionElementsKind(Handle<JSObject> object,
- Handle<Map> map) = 0;
- virtual void GrowCapacityAndConvert(Handle<JSObject> object,
- uint32_t capacity) = 0;
+ V8_WARN_UNUSED_RESULT virtual Maybe<bool> TransitionElementsKind(
+ Handle<JSObject> object, Handle<Map> map) = 0;
+ V8_WARN_UNUSED_RESULT virtual Maybe<bool> GrowCapacityAndConvert(
+ Handle<JSObject> object, uint32_t capacity) = 0;
// Unlike GrowCapacityAndConvert do not attempt to convert the backing store
// and simply return false in this case.
- virtual bool GrowCapacity(Handle<JSObject> object, uint32_t index) = 0;
+ V8_WARN_UNUSED_RESULT virtual Maybe<bool> GrowCapacity(
+ Handle<JSObject> object, uint32_t index) = 0;
static void InitializeOncePerProcess();
static void TearDown();
@@ -110,29 +112,36 @@ class ElementsAccessor {
virtual void Set(Handle<JSObject> holder, InternalIndex entry,
Object value) = 0;
- virtual void Add(Handle<JSObject> object, uint32_t index,
- Handle<Object> value, PropertyAttributes attributes,
- uint32_t new_capacity) = 0;
+ V8_WARN_UNUSED_RESULT virtual Maybe<bool> Add(Handle<JSObject> object,
+ uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ uint32_t new_capacity) = 0;
static Handle<JSArray> Concat(Isolate* isolate, BuiltinArguments* args,
uint32_t concat_size, uint32_t result_length);
- virtual uint32_t Push(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t push_size) = 0;
+ V8_WARN_UNUSED_RESULT virtual Maybe<uint32_t> Push(Handle<JSArray> receiver,
+ BuiltinArguments* args,
+ uint32_t push_size) = 0;
- virtual uint32_t Unshift(Handle<JSArray> receiver, BuiltinArguments* args,
- uint32_t unshift_size) = 0;
+ V8_WARN_UNUSED_RESULT virtual Maybe<uint32_t> Unshift(
+ Handle<JSArray> receiver, BuiltinArguments* args,
+ uint32_t unshift_size) = 0;
- virtual Handle<Object> Pop(Handle<JSArray> receiver) = 0;
+ V8_WARN_UNUSED_RESULT virtual MaybeHandle<Object> Pop(
+ Handle<JSArray> receiver) = 0;
- virtual Handle<Object> Shift(Handle<JSArray> receiver) = 0;
+ V8_WARN_UNUSED_RESULT virtual MaybeHandle<Object> Shift(
+ Handle<JSArray> receiver) = 0;
virtual Handle<NumberDictionary> Normalize(Handle<JSObject> object) = 0;
virtual size_t GetCapacity(JSObject holder, FixedArrayBase backing_store) = 0;
- virtual Object Fill(Handle<JSObject> receiver, Handle<Object> obj_value,
- size_t start, size_t end) = 0;
+ V8_WARN_UNUSED_RESULT virtual MaybeHandle<Object> Fill(
+ Handle<JSObject> receiver, Handle<Object> obj_value, size_t start,
+ size_t end) = 0;
// Check an Object's own elements for an element (using SameValueZero
// semantics)
diff --git a/chromium/v8/src/objects/embedder-data-slot-inl.h b/chromium/v8/src/objects/embedder-data-slot-inl.h
index 3f8deb39f09..983a0b0ad41 100644
--- a/chromium/v8/src/objects/embedder-data-slot-inl.h
+++ b/chromium/v8/src/objects/embedder-data-slot-inl.h
@@ -81,7 +81,7 @@ void EmbedderDataSlot::store_tagged(JSObject object, int embedder_field_index,
#endif
}
-bool EmbedderDataSlot::ToAlignedPointer(PtrComprCageBase isolate_root,
+bool EmbedderDataSlot::ToAlignedPointer(Isolate* isolate,
void** out_pointer) const {
// We don't care about atomicity of access here because embedder slots
// are accessed this way only from the main thread via API during "mutator"
@@ -89,16 +89,9 @@ bool EmbedderDataSlot::ToAlignedPointer(PtrComprCageBase isolate_root,
// at the tagged part of the embedder slot but read-only access is ok).
Address raw_value;
#ifdef V8_HEAP_SANDBOX
-
- // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
-#endif
-
uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
- const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
- raw_value = isolate->external_pointer_table().get(index) ^
- kEmbedderDataSlotPayloadTag;
+ raw_value = isolate->external_pointer_table().get(index) &
+ ~kEmbedderDataSlotPayloadTag;
#else
if (COMPRESS_POINTERS_BOOL) {
// TODO(ishell, v8:8875): When pointer compression is enabled 8-byte size
@@ -114,27 +107,20 @@ bool EmbedderDataSlot::ToAlignedPointer(PtrComprCageBase isolate_root,
return HAS_SMI_TAG(raw_value);
}
-bool EmbedderDataSlot::ToAlignedPointerSafe(PtrComprCageBase isolate_root,
+bool EmbedderDataSlot::ToAlignedPointerSafe(Isolate* isolate,
void** out_pointer) const {
#ifdef V8_HEAP_SANDBOX
-
- // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
-#endif
-
uint32_t index = base::Memory<uint32_t>(address() + kRawPayloadOffset);
Address raw_value;
- const Isolate* isolate = Isolate::FromRootAddress(isolate_root.address());
if (isolate->external_pointer_table().is_valid_index(index)) {
- raw_value = isolate->external_pointer_table().get(index) ^
- kEmbedderDataSlotPayloadTag;
+ raw_value = isolate->external_pointer_table().get(index) &
+ ~kEmbedderDataSlotPayloadTag;
*out_pointer = reinterpret_cast<void*>(raw_value);
return true;
}
return false;
#else
- return ToAlignedPointer(isolate_root, out_pointer);
+ return ToAlignedPointer(isolate, out_pointer);
#endif // V8_HEAP_SANDBOX
}
@@ -150,7 +136,7 @@ bool EmbedderDataSlot::store_aligned_pointer(Isolate* isolate, void* ptr) {
ObjectSlot(address() + kRawPayloadOffset).Relaxed_Load();
uint32_t index = static_cast<uint32_t>(index_as_object.ptr());
isolate->external_pointer_table().set(index,
- value ^ kEmbedderDataSlotPayloadTag);
+ value | kEmbedderDataSlotPayloadTag);
return true;
}
#endif
diff --git a/chromium/v8/src/objects/embedder-data-slot.h b/chromium/v8/src/objects/embedder-data-slot.h
index 65fe78403a2..6213b7b333c 100644
--- a/chromium/v8/src/objects/embedder-data-slot.h
+++ b/chromium/v8/src/objects/embedder-data-slot.h
@@ -75,8 +75,7 @@ class EmbedderDataSlot
// When V8 heap sandbox is enabled, calling this method when the raw part of
// the slot does not contain valid external pointer table index is undefined
// behaviour and most likely result in crashes.
- V8_INLINE bool ToAlignedPointer(PtrComprCageBase isolate_root,
- void** out_result) const;
+ V8_INLINE bool ToAlignedPointer(Isolate* isolate, void** out_result) const;
// Same as ToAlignedPointer() but with a workaround for V8 heap sandbox.
// When V8 heap sandbox is enabled, this method doesn't crash when the raw
@@ -87,7 +86,7 @@ class EmbedderDataSlot
//
// Call this function if you are not sure whether the slot contains valid
// external pointer or not.
- V8_INLINE bool ToAlignedPointerSafe(PtrComprCageBase isolate_root,
+ V8_INLINE bool ToAlignedPointerSafe(Isolate* isolate,
void** out_result) const;
// Returns true if the pointer was successfully stored or false it the pointer
diff --git a/chromium/v8/src/objects/feedback-vector-inl.h b/chromium/v8/src/objects/feedback-vector-inl.h
index 8853dabdbdb..aea13c5fd6d 100644
--- a/chromium/v8/src/objects/feedback-vector-inl.h
+++ b/chromium/v8/src/objects/feedback-vector-inl.h
@@ -182,14 +182,15 @@ bool FeedbackVector::IsOfLegacyType(MaybeObject value) {
#endif // DEBUG
MaybeObject FeedbackVector::Get(FeedbackSlot slot) const {
- MaybeObject value = raw_feedback_slots(GetIndex(slot));
+ MaybeObject value = raw_feedback_slots(GetIndex(slot), kRelaxedLoad);
DCHECK(!IsOfLegacyType(value));
return value;
}
MaybeObject FeedbackVector::Get(PtrComprCageBase cage_base,
FeedbackSlot slot) const {
- MaybeObject value = raw_feedback_slots(cage_base, GetIndex(slot));
+ MaybeObject value =
+ raw_feedback_slots(cage_base, GetIndex(slot), kRelaxedLoad);
DCHECK(!IsOfLegacyType(value));
return value;
}
@@ -335,6 +336,10 @@ Handle<Symbol> FeedbackVector::MegamorphicSentinel(Isolate* isolate) {
return isolate->factory()->megamorphic_symbol();
}
+Handle<Symbol> FeedbackVector::MegaDOMSentinel(Isolate* isolate) {
+ return isolate->factory()->mega_dom_symbol();
+}
+
Symbol FeedbackVector::RawUninitializedSentinel(Isolate* isolate) {
return ReadOnlyRoots(isolate).uninitialized_symbol();
}
@@ -377,6 +382,11 @@ MaybeObject FeedbackNexus::MegamorphicSentinel() const {
*FeedbackVector::MegamorphicSentinel(GetIsolate()));
}
+MaybeObject FeedbackNexus::MegaDOMSentinel() const {
+ return MaybeObject::FromObject(
+ *FeedbackVector::MegaDOMSentinel(GetIsolate()));
+}
+
MaybeObject FeedbackNexus::FromHandle(MaybeObjectHandle slot) const {
return slot.is_null() ? HeapObjectReference::ClearedValue(config()->isolate())
: *slot;
diff --git a/chromium/v8/src/objects/feedback-vector.cc b/chromium/v8/src/objects/feedback-vector.cc
index a77ea5d265f..5f4cf592b08 100644
--- a/chromium/v8/src/objects/feedback-vector.cc
+++ b/chromium/v8/src/objects/feedback-vector.cc
@@ -54,6 +54,7 @@ static bool IsPropertyNameFeedback(MaybeObject feedback) {
Symbol symbol = Symbol::cast(heap_object);
ReadOnlyRoots roots = symbol.GetReadOnlyRoots();
return symbol != roots.uninitialized_symbol() &&
+ symbol != roots.mega_dom_symbol() &&
symbol != roots.megamorphic_symbol();
}
@@ -75,8 +76,8 @@ void FeedbackMetadata::SetKind(FeedbackSlot slot, FeedbackSlotKind kind) {
}
// static
-template <typename LocalIsolate>
-Handle<FeedbackMetadata> FeedbackMetadata::New(LocalIsolate* isolate,
+template <typename IsolateT>
+Handle<FeedbackMetadata> FeedbackMetadata::New(IsolateT* isolate,
const FeedbackVectorSpec* spec) {
auto* factory = isolate->factory();
@@ -253,8 +254,6 @@ Handle<FeedbackVector> FeedbackVector::New(
DCHECK_EQ(vector->optimization_marker(),
FLAG_log_function_events ? OptimizationMarker::kLogFirstExecution
: OptimizationMarker::kNone);
- // TODO(mythria): This might change if NCI code is installed on feedback
- // vector. Update this accordingly.
DCHECK_EQ(vector->optimization_tier(), OptimizationTier::kNone);
DCHECK_EQ(vector->invocation_count(), 0);
DCHECK_EQ(vector->profiler_ticks(), 0);
@@ -676,6 +675,13 @@ bool FeedbackNexus::ConfigureMegamorphic() {
return false;
}
+void FeedbackNexus::ConfigureMegaDOM(const MaybeObjectHandle& handler) {
+ DisallowGarbageCollection no_gc;
+ MaybeObject sentinel = MegaDOMSentinel();
+
+ SetFeedback(sentinel, SKIP_WRITE_BARRIER, *handler, UPDATE_WRITE_BARRIER);
+}
+
bool FeedbackNexus::ConfigureMegamorphic(IcCheckType property_type) {
DisallowGarbageCollection no_gc;
MaybeObject sentinel = MegamorphicSentinel();
@@ -737,6 +743,10 @@ InlineCacheState FeedbackNexus::ic_state() const {
if (feedback == MegamorphicSentinel()) {
return MEGAMORPHIC;
}
+ if (feedback == MegaDOMSentinel()) {
+ DCHECK(IsLoadICKind(kind()));
+ return MEGADOM;
+ }
if (feedback->IsWeakOrCleared()) {
// Don't check if the map is cleared.
return MONOMORPHIC;
@@ -999,6 +1009,15 @@ SpeculationMode FeedbackNexus::GetSpeculationMode() {
return SpeculationModeField::decode(value);
}
+CallFeedbackContent FeedbackNexus::GetCallFeedbackContent() {
+ DCHECK(IsCallICKind(kind()));
+
+ Object call_count = GetFeedbackExtra()->cast<Object>();
+ CHECK(call_count.IsSmi());
+ uint32_t value = static_cast<uint32_t>(Smi::ToInt(call_count));
+ return CallFeedbackContentField::decode(value);
+}
+
float FeedbackNexus::ComputeCallFrequency() {
DCHECK(IsCallICKind(kind()));
@@ -1407,52 +1426,6 @@ std::vector<Handle<String>> FeedbackNexus::GetTypesForSourcePositions(
return types_for_position;
}
-namespace {
-
-Handle<JSObject> ConvertToJSObject(Isolate* isolate,
- Handle<SimpleNumberDictionary> feedback) {
- Handle<JSObject> type_profile =
- isolate->factory()->NewJSObject(isolate->object_function());
-
- for (int index = SimpleNumberDictionary::kElementsStartIndex;
- index < feedback->length();
- index += SimpleNumberDictionary::kEntrySize) {
- int key_index = index + SimpleNumberDictionary::kEntryKeyIndex;
- Object key = feedback->get(key_index);
- if (key.IsSmi()) {
- int value_index = index + SimpleNumberDictionary::kEntryValueIndex;
-
- Handle<ArrayList> position_specific_types(
- ArrayList::cast(feedback->get(value_index)), isolate);
-
- int position = Smi::ToInt(key);
- JSObject::AddDataElement(
- type_profile, position,
- isolate->factory()->NewJSArrayWithElements(
- ArrayList::Elements(isolate, position_specific_types)),
- PropertyAttributes::NONE);
- }
- }
- return type_profile;
-}
-} // namespace
-
-JSObject FeedbackNexus::GetTypeProfile() const {
- DCHECK(IsTypeProfileKind(kind()));
- Isolate* isolate = GetIsolate();
-
- MaybeObject const feedback = GetFeedback();
-
- if (feedback == UninitializedSentinel()) {
- return *isolate->factory()->NewJSObject(isolate->object_function());
- }
-
- return *ConvertToJSObject(isolate,
- handle(SimpleNumberDictionary::cast(
- feedback->GetHeapObjectAssumeStrong()),
- isolate));
-}
-
void FeedbackNexus::ResetTypeProfile() {
DCHECK(IsTypeProfileKind(kind()));
SetFeedback(UninitializedSentinel());
diff --git a/chromium/v8/src/objects/feedback-vector.h b/chromium/v8/src/objects/feedback-vector.h
index cc5e867f720..07aad24430e 100644
--- a/chromium/v8/src/objects/feedback-vector.h
+++ b/chromium/v8/src/objects/feedback-vector.h
@@ -130,8 +130,8 @@ inline bool IsCloneObjectKind(FeedbackSlotKind kind) {
inline TypeofMode GetTypeofModeFromSlotKind(FeedbackSlotKind kind) {
DCHECK(IsLoadGlobalICKind(kind));
return (kind == FeedbackSlotKind::kLoadGlobalInsideTypeof)
- ? INSIDE_TYPEOF
- : NOT_INSIDE_TYPEOF;
+ ? TypeofMode::kInside
+ : TypeofMode::kNotInside;
}
inline LanguageMode GetLanguageModeFromSlotKind(FeedbackSlotKind kind) {
@@ -311,7 +311,7 @@ class FeedbackVector
DECL_PRINTER(FeedbackVector)
- void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot); // NOLINT
+ void FeedbackSlotPrint(std::ostream& os, FeedbackSlot slot);
// Clears the vector slots. Return true if feedback has changed.
bool ClearSlots(Isolate* isolate);
@@ -322,6 +322,9 @@ class FeedbackVector
// The object that indicates a megamorphic state.
static inline Handle<Symbol> MegamorphicSentinel(Isolate* isolate);
+ // The object that indicates a MegaDOM state.
+ static inline Handle<Symbol> MegaDOMSentinel(Isolate* isolate);
+
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static inline Symbol RawUninitializedSentinel(Isolate* isolate);
@@ -389,7 +392,7 @@ class V8_EXPORT_PRIVATE FeedbackVectorSpec {
}
FeedbackSlot AddLoadGlobalICSlot(TypeofMode typeof_mode) {
- return AddSlot(typeof_mode == INSIDE_TYPEOF
+ return AddSlot(typeof_mode == TypeofMode::kInside
? FeedbackSlotKind::kLoadGlobalInsideTypeof
: FeedbackSlotKind::kLoadGlobalNotInsideTypeof);
}
@@ -532,9 +535,9 @@ class FeedbackMetadata : public HeapObject {
V8_EXPORT_PRIVATE FeedbackSlotKind GetKind(FeedbackSlot slot) const;
// If {spec} is null, then it is considered empty.
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_EXPORT_PRIVATE static Handle<FeedbackMetadata> New(
- LocalIsolate* isolate, const FeedbackVectorSpec* spec = nullptr);
+ IsolateT* isolate, const FeedbackVectorSpec* spec = nullptr);
DECL_PRINTER(FeedbackMetadata)
DECL_VERIFIER(FeedbackMetadata)
@@ -588,7 +591,6 @@ class FeedbackMetadata : public HeapObject {
// Verify that an empty hash field looks like a tagged object, but can't
// possibly be confused with a pointer.
-// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((Name::kEmptyHashField & kHeapObjectTag) == kHeapObjectTag);
STATIC_ASSERT(Name::kEmptyHashField == 0x3);
// Verify that a set hash field will not look like a tagged object.
@@ -728,7 +730,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
bool IsMegamorphic() const { return ic_state() == MEGAMORPHIC; }
bool IsGeneric() const { return ic_state() == GENERIC; }
- void Print(std::ostream& os); // NOLINT
+ void Print(std::ostream& os);
// For map-based ICs (load, keyed-load, store, keyed-store).
Map GetFirstMap() const;
@@ -773,6 +775,8 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
void ConfigurePolymorphic(
Handle<Name> name, std::vector<MapAndHandler> const& maps_and_handlers);
+ void ConfigureMegaDOM(const MaybeObjectHandle& handler);
+
BinaryOperationHint GetBinaryOperationFeedback() const;
CompareOperationHint GetCompareOperationFeedback() const;
ForInHint GetForInFeedback() const;
@@ -791,13 +795,15 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
int GetCallCount();
void SetSpeculationMode(SpeculationMode mode);
SpeculationMode GetSpeculationMode();
+ CallFeedbackContent GetCallFeedbackContent();
// Compute the call frequency based on the call count and the invocation
// count (taken from the type feedback vector).
float ComputeCallFrequency();
using SpeculationModeField = base::BitField<SpeculationMode, 0, 1>;
- using CallCountField = base::BitField<uint32_t, 1, 31>;
+ using CallFeedbackContentField = base::BitField<CallFeedbackContent, 1, 1>;
+ using CallCountField = base::BitField<uint32_t, 2, 30>;
// For InstanceOf ICs.
MaybeHandle<JSObject> GetConstructorFeedback() const;
@@ -831,7 +837,6 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
// Add a type to the list of types for source position <position>.
void Collect(Handle<String> type, int position);
- JSObject GetTypeProfile() const;
std::vector<int> GetSourcePositions() const;
std::vector<Handle<String>> GetTypesForSourcePositions(uint32_t pos) const;
@@ -847,6 +852,7 @@ class V8_EXPORT_PRIVATE FeedbackNexus final {
inline MaybeObject UninitializedSentinel() const;
inline MaybeObject MegamorphicSentinel() const;
+ inline MaybeObject MegaDOMSentinel() const;
// Create an array. The caller must install it in a feedback vector slot.
Handle<WeakFixedArray> CreateArrayOfSize(int length);
diff --git a/chromium/v8/src/objects/feedback-vector.tq b/chromium/v8/src/objects/feedback-vector.tq
index a84533db676..768254f1870 100644
--- a/chromium/v8/src/objects/feedback-vector.tq
+++ b/chromium/v8/src/objects/feedback-vector.tq
@@ -24,7 +24,7 @@ extern class FeedbackVector extends HeapObject {
shared_function_info: SharedFunctionInfo;
maybe_optimized_code: Weak<Code>;
closure_feedback_cell_array: ClosureFeedbackCellArray;
- @relaxedRead raw_feedback_slots[length]: MaybeObject;
+ @cppRelaxedLoad raw_feedback_slots[length]: MaybeObject;
}
extern class FeedbackMetadata extends HeapObject;
diff --git a/chromium/v8/src/objects/fixed-array-inl.h b/chromium/v8/src/objects/fixed-array-inl.h
index cca6d400705..43b77539e14 100644
--- a/chromium/v8/src/objects/fixed-array-inl.h
+++ b/chromium/v8/src/objects/fixed-array-inl.h
@@ -19,7 +19,6 @@
#include "src/objects/oddball.h"
#include "src/objects/slots.h"
#include "src/roots/roots-inl.h"
-#include "src/sanitizer/tsan.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -45,13 +44,11 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(WeakArrayList)
NEVER_READ_ONLY_SPACE_IMPL(WeakArrayList)
-SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
+RELEASE_ACQUIRE_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
-SYNCHRONIZED_SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
+RELEASE_ACQUIRE_SMI_ACCESSORS(WeakFixedArray, length, kLengthOffset)
-SYNCHRONIZED_SMI_ACCESSORS(WeakArrayList, capacity, kCapacityOffset)
-
-Object FixedArrayBase::unchecked_synchronized_length() const {
+Object FixedArrayBase::unchecked_length(AcquireLoadTag) const {
return ACQUIRE_READ_FIELD(*this, kLengthOffset);
}
@@ -243,15 +240,11 @@ void FixedArray::CopyElements(Isolate* isolate, int dst_index, FixedArray src,
// Due to left- and right-trimming, concurrent visitors need to read the length
// with acquire semantics.
// TODO(ulan): Acquire should not be needed anymore.
-inline int FixedArray::AllocatedSize() {
- return SizeFor(synchronized_length());
-}
+inline int FixedArray::AllocatedSize() { return SizeFor(length(kAcquireLoad)); }
inline int WeakFixedArray::AllocatedSize() {
- return SizeFor(synchronized_length());
-}
-inline int WeakArrayList::AllocatedSize() {
- return SizeFor(synchronized_capacity());
+ return SizeFor(length(kAcquireLoad));
}
+inline int WeakArrayList::AllocatedSize() { return SizeFor(capacity()); }
// Perform a binary search in a fixed array.
template <SearchMode search_mode, typename T>
@@ -445,7 +438,7 @@ MaybeObject WeakFixedArray::Get(int index) const {
MaybeObject WeakFixedArray::Get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- return objects(cage_base, index);
+ return objects(cage_base, index, kRelaxedLoad);
}
void WeakFixedArray::Set(int index, MaybeObject value, WriteBarrierMode mode) {
@@ -480,7 +473,7 @@ MaybeObject WeakArrayList::Get(int index) const {
MaybeObject WeakArrayList::Get(PtrComprCageBase cage_base, int index) const {
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(capacity()));
- return objects(cage_base, index);
+ return objects(cage_base, index, kRelaxedLoad);
}
void WeakArrayList::Set(int index, MaybeObject value, WriteBarrierMode mode) {
diff --git a/chromium/v8/src/objects/fixed-array.h b/chromium/v8/src/objects/fixed-array.h
index 98c5d8d5b5c..d6c5077aa0f 100644
--- a/chromium/v8/src/objects/fixed-array.h
+++ b/chromium/v8/src/objects/fixed-array.h
@@ -71,10 +71,12 @@ enum FixedArraySubInstanceType {
class FixedArrayBase
: public TorqueGeneratedFixedArrayBase<FixedArrayBase, HeapObject> {
public:
- // Get and set the length using acquire loads and release stores.
- DECL_SYNCHRONIZED_INT_ACCESSORS(length)
+ // Forward declare the non-atomic (set_)length defined in torque.
+ using TorqueGeneratedFixedArrayBase::length;
+ using TorqueGeneratedFixedArrayBase::set_length;
+ DECL_RELEASE_ACQUIRE_INT_ACCESSORS(length)
- inline Object unchecked_synchronized_length() const;
+ inline Object unchecked_length(AcquireLoadTag) const;
static int GetMaxLengthForNewSpaceAllocation(ElementsKind kind);
@@ -283,8 +285,10 @@ class WeakFixedArray
int index, MaybeObject value,
WriteBarrierMode mode = WriteBarrierMode::UPDATE_WRITE_BARRIER);
- // Get and set the length using acquire loads and release stores.
- DECL_SYNCHRONIZED_INT_ACCESSORS(length)
+ // Forward declare the non-atomic (set_)length defined in torque.
+ using TorqueGeneratedWeakFixedArray::length;
+ using TorqueGeneratedWeakFixedArray::set_length;
+ DECL_RELEASE_ACQUIRE_INT_ACCESSORS(length)
// Gives access to raw memory which stores the array's data.
inline MaybeObjectSlot data_start();
@@ -376,9 +380,6 @@ class WeakArrayList
V8_EXPORT_PRIVATE bool IsFull();
- // Get and set the capacity using acquire loads and release stores.
- DECL_SYNCHRONIZED_INT_ACCESSORS(capacity)
-
int AllocatedSize();
class BodyDescriptor;
diff --git a/chromium/v8/src/objects/fixed-array.tq b/chromium/v8/src/objects/fixed-array.tq
index 6aed0c26b27..31198d70d4a 100644
--- a/chromium/v8/src/objects/fixed-array.tq
+++ b/chromium/v8/src/objects/fixed-array.tq
@@ -26,7 +26,7 @@ extern class FixedDoubleArray extends FixedArrayBase {
@generateCppClass
extern class WeakFixedArray extends HeapObject {
const length: Smi;
- @relaxedRead objects[length]: MaybeObject;
+ @cppRelaxedLoad objects[length]: MaybeObject;
}
@generateCppClass
@@ -51,7 +51,7 @@ extern class TemplateList extends FixedArray {
extern class WeakArrayList extends HeapObject {
const capacity: Smi;
length: Smi;
- @relaxedRead objects[capacity]: MaybeObject;
+ @cppRelaxedLoad objects[capacity]: MaybeObject;
}
extern operator '.length_intptr' macro LoadAndUntagFixedArrayBaseLength(
diff --git a/chromium/v8/src/objects/foreign-inl.h b/chromium/v8/src/objects/foreign-inl.h
index 150857f49a0..a278c844b92 100644
--- a/chromium/v8/src/objects/foreign-inl.h
+++ b/chromium/v8/src/objects/foreign-inl.h
@@ -29,7 +29,8 @@ bool Foreign::IsNormalized(Object value) {
}
DEF_GETTER(Foreign, foreign_address, Address) {
- return ReadExternalPointerField(kForeignAddressOffset, cage_base,
+ Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ return ReadExternalPointerField(kForeignAddressOffset, isolate,
kForeignForeignAddressTag);
}
diff --git a/chromium/v8/src/objects/free-space-inl.h b/chromium/v8/src/objects/free-space-inl.h
index e8ce1d63507..443ff25caa6 100644
--- a/chromium/v8/src/objects/free-space-inl.h
+++ b/chromium/v8/src/objects/free-space-inl.h
@@ -23,7 +23,7 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(FreeSpace)
RELAXED_SMI_ACCESSORS(FreeSpace, size, kSizeOffset)
-int FreeSpace::Size() { return size(); }
+int FreeSpace::Size() { return size(kRelaxedLoad); }
FreeSpace FreeSpace::next() {
DCHECK(IsValid());
@@ -50,10 +50,10 @@ bool FreeSpace::IsValid() {
Heap* heap = GetHeapFromWritableObject(*this);
Object free_space_map =
Isolate::FromHeap(heap)->root(RootIndex::kFreeSpaceMap);
- CHECK_IMPLIES(!map_slot().contains_value(free_space_map.ptr()),
+ CHECK_IMPLIES(!map_slot().contains_map_value(free_space_map.ptr()),
!heap->deserialization_complete() &&
- map_slot().contains_value(kNullAddress));
- CHECK_LE(kNextOffset + kTaggedSize, relaxed_read_size());
+ map_slot().contains_map_value(kNullAddress));
+ CHECK_LE(kNextOffset + kTaggedSize, size(kRelaxedLoad));
return true;
}
diff --git a/chromium/v8/src/objects/free-space.h b/chromium/v8/src/objects/free-space.h
index 76d618cbfdb..d2f569bb33a 100644
--- a/chromium/v8/src/objects/free-space.h
+++ b/chromium/v8/src/objects/free-space.h
@@ -23,8 +23,7 @@ namespace internal {
class FreeSpace : public TorqueGeneratedFreeSpace<FreeSpace, HeapObject> {
public:
// [size]: size of the free space including the header.
- inline int relaxed_read_size() const;
- inline void relaxed_write_size(int value);
+ DECL_RELAXED_SMI_ACCESSORS(size)
inline int Size();
diff --git a/chromium/v8/src/objects/function-kind.h b/chromium/v8/src/objects/function-kind.h
index b863f9c72fc..02b5b7636a1 100644
--- a/chromium/v8/src/objects/function-kind.h
+++ b/chromium/v8/src/objects/function-kind.h
@@ -181,6 +181,14 @@ inline bool BindsSuper(FunctionKind kind) {
IsClassConstructor(kind);
}
+inline bool IsAwaitAsIdentifierDisallowed(FunctionKind kind) {
+ // 'await' is always disallowed as an identifier in module contexts. Callers
+ // should short-circuit the module case instead of calling this.
+ DCHECK(!IsModule(kind));
+ return IsAsyncFunction(kind) ||
+ kind == FunctionKind::kClassStaticInitializerFunction;
+}
+
inline const char* FunctionKind2String(FunctionKind kind) {
switch (kind) {
case FunctionKind::kNormalFunction:
diff --git a/chromium/v8/src/objects/hash-table-inl.h b/chromium/v8/src/objects/hash-table-inl.h
index 27645058b30..19382cc7ec1 100644
--- a/chromium/v8/src/objects/hash-table-inl.h
+++ b/chromium/v8/src/objects/hash-table-inl.h
@@ -130,9 +130,8 @@ Handle<Map> EphemeronHashTable::GetMap(ReadOnlyRoots roots) {
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
-InternalIndex HashTable<Derived, Shape>::FindEntry(LocalIsolate* isolate,
- Key key) {
+template <typename IsolateT>
+InternalIndex HashTable<Derived, Shape>::FindEntry(IsolateT* isolate, Key key) {
ReadOnlyRoots roots(isolate);
return FindEntry(isolate, roots, key, Shape::Hash(roots, key));
}
diff --git a/chromium/v8/src/objects/hash-table.h b/chromium/v8/src/objects/hash-table.h
index 12ac020fb76..c17a15c95f3 100644
--- a/chromium/v8/src/objects/hash-table.h
+++ b/chromium/v8/src/objects/hash-table.h
@@ -125,9 +125,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
using Key = typename Shape::Key;
// Returns a new HashTable object.
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_WARN_UNUSED_RESULT static Handle<Derived> New(
- LocalIsolate* isolate, int at_least_space_for,
+ IsolateT* isolate, int at_least_space_for,
AllocationType allocation = AllocationType::kYoung,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
@@ -140,8 +140,8 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
// Find entry for key otherwise return kNotFound.
inline InternalIndex FindEntry(PtrComprCageBase cage_base,
ReadOnlyRoots roots, Key key, int32_t hash);
- template <typename LocalIsolate>
- inline InternalIndex FindEntry(LocalIsolate* isolate, Key key);
+ template <typename IsolateT>
+ inline InternalIndex FindEntry(IsolateT* isolate, Key key);
// Rehashes the table in-place.
void Rehash(PtrComprCageBase cage_base);
@@ -194,9 +194,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
}
// Ensure enough space for n additional elements.
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_WARN_UNUSED_RESULT static Handle<Derived> EnsureCapacity(
- LocalIsolate* isolate, Handle<Derived> table, int n = 1,
+ IsolateT* isolate, Handle<Derived> table, int n = 1,
AllocationType allocation = AllocationType::kYoung);
// Returns true if this table has sufficient capacity for adding n elements.
@@ -212,9 +212,9 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) HashTable
protected:
friend class ObjectHashTable;
- template <typename LocalIsolate>
+ template <typename IsolateT>
V8_WARN_UNUSED_RESULT static Handle<Derived> NewInternal(
- LocalIsolate* isolate, int capacity, AllocationType allocation);
+ IsolateT* isolate, int capacity, AllocationType allocation);
// Find the entry at which to insert element with the given key that
// has the given hash value.
diff --git a/chromium/v8/src/objects/heap-object.h b/chromium/v8/src/objects/heap-object.h
index e0aea975371..128e17fab1b 100644
--- a/chromium/v8/src/objects/heap-object.h
+++ b/chromium/v8/src/objects/heap-object.h
@@ -43,8 +43,8 @@ class HeapObject : public Object {
inline void set_map_no_write_barrier(Map value);
// Access the map using acquire load and release store.
- DECL_GETTER(synchronized_map, Map)
- inline void synchronized_set_map(Map value);
+ DECL_ACQUIRE_GETTER(map, Map)
+ inline void set_map(Map value, ReleaseStoreTag);
// Compare-and-swaps map word using release store, returns true if the map
// word was actually swapped.
@@ -58,12 +58,12 @@ class HeapObject : public Object {
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
- DECL_GETTER(map_word, MapWord)
- inline void set_map_word(MapWord map_word);
+ DECL_RELAXED_GETTER(map_word, MapWord)
+ inline void set_map_word(MapWord map_word, RelaxedStoreTag);
// Access the map word using acquire load and release store.
- DECL_GETTER(synchronized_map_word, MapWord)
- inline void synchronized_set_map_word(MapWord map_word);
+ DECL_ACQUIRE_GETTER(map_word, MapWord)
+ inline void set_map_word(MapWord map_word, ReleaseStoreTag);
// This method exists to help remove GetIsolate/GetHeap from HeapObject, in a
// way that doesn't require passing Isolate/Heap down huge call chains or to
@@ -163,9 +163,9 @@ class HeapObject : public Object {
const DisallowGarbageCollection& promise);
// Dispatched behavior.
- void HeapObjectShortPrint(std::ostream& os); // NOLINT
+ void HeapObjectShortPrint(std::ostream& os);
#ifdef OBJECT_PRINT
- void PrintHeader(std::ostream& os, const char* id); // NOLINT
+ void PrintHeader(std::ostream& os, const char* id);
#endif
DECL_PRINTER(HeapObject)
EXPORT_DECL_VERIFIER(HeapObject)
diff --git a/chromium/v8/src/objects/instance-type-inl.h b/chromium/v8/src/objects/instance-type-inl.h
index 8edb73d666a..32ee7a24f53 100644
--- a/chromium/v8/src/objects/instance-type-inl.h
+++ b/chromium/v8/src/objects/instance-type-inl.h
@@ -31,14 +31,14 @@ struct InstanceRangeChecker {
template <InstanceType upper_limit>
struct InstanceRangeChecker<FIRST_TYPE, upper_limit> {
static constexpr bool Check(InstanceType value) {
- CONSTEXPR_DCHECK(FIRST_TYPE <= value);
+ DCHECK_LE(FIRST_TYPE, value);
return value <= upper_limit;
}
};
template <InstanceType lower_limit>
struct InstanceRangeChecker<lower_limit, LAST_TYPE> {
static constexpr bool Check(InstanceType value) {
- CONSTEXPR_DCHECK(LAST_TYPE >= value);
+ DCHECK_GE(LAST_TYPE, value);
return value >= lower_limit;
}
};
diff --git a/chromium/v8/src/objects/instance-type.h b/chromium/v8/src/objects/instance-type.h
index 9c8c8d5d63d..7a11f9cf688 100644
--- a/chromium/v8/src/objects/instance-type.h
+++ b/chromium/v8/src/objects/instance-type.h
@@ -33,16 +33,11 @@ enum StringRepresentationTag {
};
const uint32_t kIsIndirectStringMask = 1 << 0;
const uint32_t kIsIndirectStringTag = 1 << 0;
-// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kSeqStringTag & kIsIndirectStringMask) == 0);
-// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kExternalStringTag & kIsIndirectStringMask) == 0);
-// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kConsStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
-// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kSlicedStringTag & kIsIndirectStringMask) ==
kIsIndirectStringTag);
-// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((kThinStringTag & kIsIndirectStringMask) == kIsIndirectStringTag);
// For strings, bit 3 indicates whether the string consists of two-byte
@@ -152,7 +147,6 @@ enum InstanceType : uint16_t {
constexpr InstanceType LAST_STRING_TYPE =
static_cast<InstanceType>(FIRST_NONSTRING_TYPE - 1);
-// NOLINTNEXTLINE(runtime/references) (false positive)
STATIC_ASSERT((FIRST_NONSTRING_TYPE & kIsNotStringMask) != kStringTag);
STATIC_ASSERT(JS_OBJECT_TYPE == Internals::kJSObjectType);
STATIC_ASSERT(JS_API_OBJECT_TYPE == Internals::kJSApiObjectType);
@@ -258,6 +252,7 @@ TYPED_ARRAYS(TYPED_ARRAY_IS_TYPE_FUNCTION_DECL)
V(_, FunctionTemplateInfoMap, function_template_info_map, \
FunctionTemplateInfo) \
V(_, HeapNumberMap, heap_number_map, HeapNumber) \
+ V(_, MegaDomHandlerMap, mega_dom_handler_map, MegaDomHandler) \
V(_, MetaMap, meta_map, Map) \
V(_, PreparseDataMap, preparse_data_map, PreparseData) \
V(_, PrototypeInfoMap, prototype_info_map, PrototypeInfo) \
diff --git a/chromium/v8/src/objects/internal-index.h b/chromium/v8/src/objects/internal-index.h
index a241f3c686f..dec83cefe2a 100644
--- a/chromium/v8/src/objects/internal-index.h
+++ b/chromium/v8/src/objects/internal-index.h
@@ -40,8 +40,7 @@ class InternalIndex {
return static_cast<uint32_t>(entry_);
}
constexpr int as_int() const {
- CONSTEXPR_DCHECK(entry_ <=
- static_cast<size_t>(std::numeric_limits<int>::max()));
+ DCHECK_GE(std::numeric_limits<int>::max(), entry_);
return static_cast<int>(entry_);
}
diff --git a/chromium/v8/src/objects/intl-objects.cc b/chromium/v8/src/objects/intl-objects.cc
index d9fca11e735..f5be56df451 100644
--- a/chromium/v8/src/objects/intl-objects.cc
+++ b/chromium/v8/src/objects/intl-objects.cc
@@ -90,7 +90,7 @@ inline constexpr uint16_t ToLatin1Lower(uint16_t ch) {
// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
inline constexpr uint16_t ToLatin1Upper(uint16_t ch) {
- CONSTEXPR_DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
+ DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
return ch &
~((IsAsciiLower(ch) || (((ch & 0xE0) == 0xE0) && ch != 0xF7)) << 5);
}
@@ -479,7 +479,12 @@ Handle<JSObject> InnerAddElement(Isolate* isolate, Handle<JSArray> array,
field_type_string, NONE);
JSObject::AddProperty(isolate, element, factory->value_string(), value, NONE);
- JSObject::AddDataElement(array, index, element, NONE);
+ // TODO(victorgomes): Temporarily forcing a fatal error here in case of
+ // overflow, until Intl::AddElement can handle exceptions.
+ if (JSObject::AddDataElement(array, index, element, NONE).IsNothing()) {
+ FATAL("Fatal JavaScript invalid array size when adding element");
+ UNREACHABLE();
+ }
return element;
}
@@ -568,7 +573,14 @@ std::set<std::string> Intl::BuildLocaleSet(
for (const std::string& locale : icu_available_locales) {
if (path != nullptr || validate_key != nullptr) {
if (!ValidateResource(icu::Locale(locale.c_str()), path, validate_key)) {
- continue;
+ // FIXME(chromium:1215606) Find a beter fix for nb->no fallback
+ if (locale != "nb") {
+ continue;
+ }
+ // Try no for nb
+ if (!ValidateResource(icu::Locale("no"), path, validate_key)) {
+ continue;
+ }
}
}
locales.insert(locale);
@@ -1486,7 +1498,6 @@ icu::LocaleMatcher BuildLocaleMatcher(
builder.addSupportedLocale(l);
}
}
-
return builder.build(*status);
}
@@ -1533,23 +1544,12 @@ std::string BestFitMatcher(Isolate* isolate,
const std::set<std::string>& available_locales,
const std::vector<std::string>& requested_locales) {
UErrorCode status = U_ZERO_ERROR;
- icu::LocaleMatcher matcher =
- BuildLocaleMatcher(isolate, available_locales, &status);
- DCHECK(U_SUCCESS(status));
-
Iterator iter(requested_locales.cbegin(), requested_locales.cend());
- std::string bestfit =
- matcher.getBestMatch(iter, status)->toLanguageTag<std::string>(status);
- if (U_FAILURE(status)) {
- return DefaultLocale(isolate);
- }
- // We need to return the extensions with it.
- for (auto it = requested_locales.begin(); it != requested_locales.end();
- ++it) {
- if (it->find(bestfit) == 0) {
- return *it;
- }
- }
+ std::string bestfit = BuildLocaleMatcher(isolate, available_locales, &status)
+ .getBestMatchResult(iter, status)
+ .makeResolvedLocale(status)
+ .toLanguageTag<std::string>(status);
+ DCHECK(U_SUCCESS(status));
return bestfit;
}
@@ -1561,32 +1561,29 @@ std::vector<std::string> BestFitSupportedLocales(
UErrorCode status = U_ZERO_ERROR;
icu::LocaleMatcher matcher =
BuildLocaleMatcher(isolate, available_locales, &status);
- DCHECK(U_SUCCESS(status));
-
- std::string default_locale = DefaultLocale(isolate);
std::vector<std::string> result;
- for (auto it = requested_locales.cbegin(); it != requested_locales.cend();
- it++) {
- if (*it == default_locale) {
- result.push_back(*it);
- } else {
+ if (U_SUCCESS(status)) {
+ for (auto it = requested_locales.cbegin(); it != requested_locales.cend();
+ it++) {
status = U_ZERO_ERROR;
icu::Locale desired = icu::Locale::forLanguageTag(it->c_str(), status);
- std::string bestfit = matcher.getBestMatch(desired, status)
- ->toLanguageTag<std::string>(status);
- // We need to return the extensions with it.
- if (U_SUCCESS(status) && it->find(bestfit) == 0) {
- result.push_back(*it);
- }
+ icu::LocaleMatcher::Result matched =
+ matcher.getBestMatchResult(desired, status);
+ if (U_FAILURE(status)) continue;
+ if (matched.getSupportedIndex() < 0) continue;
+ std::string bestfit =
+ matched.makeResolvedLocale(status).toLanguageTag<std::string>(status);
+ if (U_FAILURE(status)) continue;
+ result.push_back(bestfit);
}
}
return result;
}
// ecma262 #sec-createarrayfromlist
-Handle<JSArray> CreateArrayFromList(Isolate* isolate,
- std::vector<std::string> elements,
- PropertyAttributes attr) {
+MaybeHandle<JSArray> CreateArrayFromList(Isolate* isolate,
+ std::vector<std::string> elements,
+ PropertyAttributes attr) {
Factory* factory = isolate->factory();
// Let array be ! ArrayCreate(0).
Handle<JSArray> array = factory->NewJSArray(0);
@@ -1599,10 +1596,11 @@ Handle<JSArray> CreateArrayFromList(Isolate* isolate,
const std::string& part = elements[i];
Handle<String> value =
factory->NewStringFromUtf8(CStrVector(part.c_str())).ToHandleChecked();
- JSObject::AddDataElement(array, i, value, attr);
+ MAYBE_RETURN(JSObject::AddDataElement(array, i, value, attr),
+ MaybeHandle<JSArray>());
}
// 5. Return array.
- return array;
+ return MaybeHandle<JSArray>(array);
}
// ECMA 402 9.2.9 SupportedLocales(availableLocales, requestedLocales, options)
@@ -2042,23 +2040,15 @@ icu::TimeZone* ICUTimezoneCache::GetTimeZone() {
bool ICUTimezoneCache::GetOffsets(double time_ms, bool is_utc,
int32_t* raw_offset, int32_t* dst_offset) {
UErrorCode status = U_ZERO_ERROR;
- // TODO(jshin): ICU TimeZone class handles skipped time differently from
- // Ecma 262 (https://github.com/tc39/ecma262/pull/778) and icu::TimeZone
- // class does not expose the necessary API. Fixing
- // http://bugs.icu-project.org/trac/ticket/13268 would make it easy to
- // implement the proposed spec change. A proposed fix for ICU is
- // https://chromium-review.googlesource.com/851265 .
- // In the meantime, use an internal (still public) API of icu::BasicTimeZone.
- // Once it's accepted by the upstream, get rid of cast. Note that casting
- // TimeZone to BasicTimeZone is safe because we know that icu::TimeZone used
- // here is a BasicTimeZone.
if (is_utc) {
GetTimeZone()->getOffset(time_ms, false, *raw_offset, *dst_offset, status);
} else {
+ // Note that casting TimeZone to BasicTimeZone is safe because we know that
+ // icu::TimeZone used here is a BasicTimeZone.
static_cast<const icu::BasicTimeZone*>(GetTimeZone())
- ->getOffsetFromLocal(time_ms, icu::BasicTimeZone::kFormer,
- icu::BasicTimeZone::kFormer, *raw_offset,
- *dst_offset, status);
+ ->getOffsetFromLocal(time_ms, UCAL_TZ_LOCAL_FORMER,
+ UCAL_TZ_LOCAL_FORMER, *raw_offset, *dst_offset,
+ status);
}
return U_SUCCESS(status);
diff --git a/chromium/v8/src/objects/intl-objects.h b/chromium/v8/src/objects/intl-objects.h
index d4b4feed686..ec0eb93873b 100644
--- a/chromium/v8/src/objects/intl-objects.h
+++ b/chromium/v8/src/objects/intl-objects.h
@@ -21,7 +21,7 @@
#include "unicode/locid.h"
#include "unicode/uversion.h"
-#define V8_MINIMUM_ICU_VERSION 68
+#define V8_MINIMUM_ICU_VERSION 69
namespace U_ICU_NAMESPACE {
class BreakIterator;
diff --git a/chromium/v8/src/objects/js-array-buffer-inl.h b/chromium/v8/src/objects/js-array-buffer-inl.h
index b4aa5e33b98..0fa5737ec71 100644
--- a/chromium/v8/src/objects/js-array-buffer-inl.h
+++ b/chromium/v8/src/objects/js-array-buffer-inl.h
@@ -43,7 +43,8 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- Address value = ReadExternalPointerField(kBackingStoreOffset, cage_base,
+ Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ Address value = ReadExternalPointerField(kBackingStoreOffset, isolate,
kArrayBufferBackingStoreTag);
return reinterpret_cast<void*>(value);
}
@@ -167,6 +168,8 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory,
JSArrayBuffer::IsAsmJsMemoryBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)
+BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_resizable,
+ JSArrayBuffer::IsResizableBit)
size_t JSArrayBufferView::byte_offset() const {
return ReadField<size_t>(kByteOffsetOffset);
@@ -188,18 +191,57 @@ bool JSArrayBufferView::WasDetached() const {
return JSArrayBuffer::cast(buffer()).was_detached();
}
+BIT_FIELD_ACCESSORS(JSTypedArray, bit_field, is_length_tracking,
+ JSTypedArray::IsLengthTrackingBit)
+BIT_FIELD_ACCESSORS(JSTypedArray, bit_field, is_backed_by_rab,
+ JSTypedArray::IsBackedByRabBit)
+
+bool JSTypedArray::IsVariableLength() const {
+ return is_length_tracking() || is_backed_by_rab();
+}
+
+size_t JSTypedArray::GetLength() const {
+ if (WasDetached()) return 0;
+ if (is_length_tracking()) {
+ if (is_backed_by_rab()) {
+ return buffer().byte_length() / element_size();
+ }
+ return buffer().GetBackingStore()->byte_length(std::memory_order_seq_cst) /
+ element_size();
+ }
+ size_t array_length = LengthUnchecked();
+ if (is_backed_by_rab()) {
+ // The sum can't overflow, since we have managed to allocate the
+ // JSTypedArray.
+ if (byte_offset() + array_length * element_size() >
+ buffer().byte_length()) {
+ return 0;
+ }
+ }
+ return array_length;
+}
+
void JSTypedArray::AllocateExternalPointerEntries(Isolate* isolate) {
InitExternalPointerField(kExternalPointerOffset, isolate);
}
-size_t JSTypedArray::length() const { return ReadField<size_t>(kLengthOffset); }
+size_t JSTypedArray::length() const {
+ DCHECK(!is_length_tracking());
+ DCHECK(!is_backed_by_rab());
+ return ReadField<size_t>(kLengthOffset);
+}
+
+size_t JSTypedArray::LengthUnchecked() const {
+ return ReadField<size_t>(kLengthOffset);
+}
void JSTypedArray::set_length(size_t value) {
WriteField<size_t>(kLengthOffset, value);
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- return ReadExternalPointerField(kExternalPointerOffset, cage_base,
+ Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ return ReadExternalPointerField(kExternalPointerOffset, isolate,
kTypedArrayExternalPointerTag);
}
@@ -320,8 +362,9 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
+ Isolate* isolate = GetIsolateForHeapSandbox(*this);
return reinterpret_cast<void*>(ReadExternalPointerField(
- kDataPointerOffset, cage_base, kDataViewDataPointerTag));
+ kDataPointerOffset, isolate, kDataViewDataPointerTag));
}
void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
diff --git a/chromium/v8/src/objects/js-array-buffer.cc b/chromium/v8/src/objects/js-array-buffer.cc
index 074a8dc1bf2..91175309f99 100644
--- a/chromium/v8/src/objects/js-array-buffer.cc
+++ b/chromium/v8/src/objects/js-array-buffer.cc
@@ -35,11 +35,12 @@ bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
}
} // anonymous namespace
-void JSArrayBuffer::Setup(SharedFlag shared,
+void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
std::shared_ptr<BackingStore> backing_store) {
clear_padding();
set_bit_field(0);
set_is_shared(shared == SharedFlag::kShared);
+ set_is_resizable(resizable == ResizableFlag::kResizable);
set_is_detachable(shared != SharedFlag::kShared);
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
SetEmbedderField(i, Smi::zero());
@@ -61,10 +62,17 @@ void JSArrayBuffer::Setup(SharedFlag shared,
void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
DCHECK_NOT_NULL(backing_store);
DCHECK_EQ(is_shared(), backing_store->is_shared());
+ DCHECK_EQ(is_resizable(), backing_store->is_resizable());
DCHECK(!was_detached());
Isolate* isolate = GetIsolate();
set_backing_store(isolate, backing_store->buffer_start());
- set_byte_length(backing_store->byte_length());
+ if (is_shared() && is_resizable()) {
+ // GSABs need to read their byte_length from the BackingStore. Maintain the
+ // invariant that their byte_length field is always 0.
+ set_byte_length(0);
+ } else {
+ set_byte_length(backing_store->byte_length());
+ }
if (backing_store->is_wasm_memory()) set_is_detachable(false);
if (!backing_store->free_on_destruct()) set_is_external(true);
Heap* heap = isolate->heap();
@@ -86,9 +94,12 @@ void JSArrayBuffer::Detach(bool force_for_wasm_memory) {
}
Isolate* const isolate = GetIsolate();
- if (backing_store()) {
- std::shared_ptr<BackingStore> backing_store;
- backing_store = RemoveExtension();
+ ArrayBufferExtension* extension = this->extension();
+
+ if (extension) {
+ DisallowGarbageCollection disallow_gc;
+ isolate->heap()->DetachArrayBufferExtension(*this, extension);
+ std::shared_ptr<BackingStore> backing_store = RemoveExtension();
CHECK_IMPLIES(force_for_wasm_memory, backing_store->is_wasm_memory());
}
@@ -151,14 +162,14 @@ void JSArrayBuffer::YoungMarkExtensionPromoted() {
Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
Isolate* isolate = GetIsolate();
Handle<JSTypedArray> self(*this, isolate);
- DCHECK(IsTypedArrayElementsKind(self->GetElementsKind()));
-
+ DCHECK(IsTypedArrayOrRabGsabTypedArrayElementsKind(self->GetElementsKind()));
Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(self->buffer()),
isolate);
if (!is_on_heap()) {
// Already is off heap, so return the existing buffer.
return array_buffer;
}
+ DCHECK(!array_buffer->is_resizable());
// The existing array buffer should be empty.
DCHECK_NULL(array_buffer->backing_store());
@@ -179,7 +190,8 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
}
// Attach the backing store to the array buffer.
- array_buffer->Setup(SharedFlag::kNotShared, std::move(backing_store));
+ array_buffer->Setup(SharedFlag::kNotShared, ResizableFlag::kNotResizable,
+ std::move(backing_store));
// Clear the elements of the typed array.
self->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
@@ -267,6 +279,7 @@ ExternalArrayType JSTypedArray::type() {
return kExternal##Type##Array;
TYPED_ARRAYS(ELEMENTS_KIND_TO_ARRAY_TYPE)
+ RAB_GSAB_TYPED_ARRAYS_WITH_TYPED_ARRAY_TYPE(ELEMENTS_KIND_TO_ARRAY_TYPE)
#undef ELEMENTS_KIND_TO_ARRAY_TYPE
default:
@@ -274,13 +287,14 @@ ExternalArrayType JSTypedArray::type() {
}
}
-size_t JSTypedArray::element_size() {
+size_t JSTypedArray::element_size() const {
switch (map().elements_kind()) {
#define ELEMENTS_KIND_TO_ELEMENT_SIZE(Type, type, TYPE, ctype) \
case TYPE##_ELEMENTS: \
return sizeof(ctype);
TYPED_ARRAYS(ELEMENTS_KIND_TO_ELEMENT_SIZE)
+ RAB_GSAB_TYPED_ARRAYS(ELEMENTS_KIND_TO_ELEMENT_SIZE)
#undef ELEMENTS_KIND_TO_ELEMENT_SIZE
default:
@@ -288,5 +302,24 @@ size_t JSTypedArray::element_size() {
}
}
+size_t JSTypedArray::LengthTrackingGsabBackedTypedArrayLength(
+ Isolate* isolate, Address raw_array) {
+ // TODO(v8:11111): Cache the last seen length in JSArrayBuffer and use it
+ // in bounds checks to minimize the need for calling this function.
+ DCHECK(FLAG_harmony_rab_gsab);
+ DisallowGarbageCollection no_gc;
+ DisallowJavascriptExecution no_js(isolate);
+ JSTypedArray array = JSTypedArray::cast(Object(raw_array));
+ CHECK(array.is_length_tracking());
+ JSArrayBuffer buffer = array.buffer();
+ CHECK(buffer.is_resizable());
+ CHECK(buffer.is_shared());
+ size_t backing_byte_length =
+ buffer.GetBackingStore()->byte_length(std::memory_order_seq_cst);
+ CHECK_GE(backing_byte_length, array.byte_offset());
+ auto element_byte_size = ElementsKindToByteSize(array.GetElementsKind());
+ return (backing_byte_length - array.byte_offset()) / element_byte_size;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/objects/js-array-buffer.h b/chromium/v8/src/objects/js-array-buffer.h
index 3ec5e0d5177..f723380772b 100644
--- a/chromium/v8/src/objects/js-array-buffer.h
+++ b/chromium/v8/src/objects/js-array-buffer.h
@@ -77,13 +77,18 @@ class JSArrayBuffer
// [is_asmjs_memory]: true => this buffer was once used as asm.js memory.
DECL_BOOLEAN_ACCESSORS(is_asmjs_memory)
- // [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer.
+ // [is_shared]: true if this is a SharedArrayBuffer or a
+ // GrowableSharedArrayBuffer.
DECL_BOOLEAN_ACCESSORS(is_shared)
+ // [is_resizable]: true if this is a ResizableArrayBuffer or a
+ // GrowableSharedArrayBuffer.
+ DECL_BOOLEAN_ACCESSORS(is_resizable)
+
// Initializes the fields of the ArrayBuffer. The provided backing_store can
// be nullptr. If it is not nullptr, then the function registers it with
// src/heap/array-buffer-tracker.h.
- V8_EXPORT_PRIVATE void Setup(SharedFlag shared,
+ V8_EXPORT_PRIVATE void Setup(SharedFlag shared, ResizableFlag resizable,
std::shared_ptr<BackingStore> backing_store);
// Attaches the backing store to an already constructed empty ArrayBuffer.
@@ -167,7 +172,7 @@ class ArrayBufferExtension : public Malloced {
std::atomic<GcState> young_gc_state_;
std::shared_ptr<BackingStore> backing_store_;
ArrayBufferExtension* next_;
- std::size_t accounting_length_;
+ std::atomic<size_t> accounting_length_;
GcState young_gc_state() {
return young_gc_state_.load(std::memory_order_relaxed);
@@ -205,10 +210,16 @@ class ArrayBufferExtension : public Malloced {
std::shared_ptr<BackingStore> backing_store() { return backing_store_; }
BackingStore* backing_store_raw() { return backing_store_.get(); }
- size_t accounting_length() { return accounting_length_; }
+ size_t accounting_length() {
+ return accounting_length_.load(std::memory_order_relaxed);
+ }
void set_accounting_length(size_t accounting_length) {
- accounting_length_ = accounting_length;
+ accounting_length_.store(accounting_length, std::memory_order_relaxed);
+ }
+
+ size_t ClearAccountingLength() {
+ return accounting_length_.exchange(0, std::memory_order_relaxed);
}
std::shared_ptr<BackingStore> RemoveBackingStore() {
@@ -253,6 +264,9 @@ class JSTypedArray
// eventually.
static constexpr size_t kMaxLength = v8::TypedArray::kMaxLength;
+ // Bit positions for [bit_field].
+ DEFINE_TORQUE_GENERATED_JS_TYPED_ARRAY_FLAGS()
+
// [length]: length of typed array in elements.
DECL_PRIMITIVE_GETTER(length, size_t)
@@ -265,7 +279,7 @@ class JSTypedArray
PropertyDescriptor* desc, Maybe<ShouldThrow> should_throw);
ExternalArrayType type();
- V8_EXPORT_PRIVATE size_t element_size();
+ V8_EXPORT_PRIVATE size_t element_size() const;
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
@@ -290,6 +304,14 @@ class JSTypedArray
inline bool is_on_heap() const;
inline bool is_on_heap(AcquireLoadTag tag) const;
+ DECL_BOOLEAN_ACCESSORS(is_length_tracking)
+ DECL_BOOLEAN_ACCESSORS(is_backed_by_rab)
+ inline bool IsVariableLength() const;
+ inline size_t GetLength() const;
+
+ static size_t LengthTrackingGsabBackedTypedArrayLength(Isolate* isolate,
+ Address raw_array);
+
// Note: this is a pointer compression specific optimization.
// Normally, on-heap typed arrays contain HeapObject value in |base_pointer|
// field and an offset in |external_pointer|.
@@ -346,6 +368,9 @@ class JSTypedArray
friend class Factory;
DECL_PRIMITIVE_SETTER(length, size_t)
+ // Reads the "length" field, doesn't assert the TypedArray is not RAB / GSAB
+ // backed.
+ inline size_t LengthUnchecked() const;
DECL_GETTER(external_pointer, Address)
DECL_GETTER(external_pointer_raw, ExternalPointer_t)
diff --git a/chromium/v8/src/objects/js-array-buffer.tq b/chromium/v8/src/objects/js-array-buffer.tq
index 72e74cc99bd..ddd90d4c815 100644
--- a/chromium/v8/src/objects/js-array-buffer.tq
+++ b/chromium/v8/src/objects/js-array-buffer.tq
@@ -8,11 +8,13 @@ bitfield struct JSArrayBufferFlags extends uint32 {
was_detached: bool: 1 bit;
is_asm_js_memory: bool: 1 bit;
is_shared: bool: 1 bit;
+ is_resizable: bool: 1 bit;
}
@generateCppClass
extern class JSArrayBuffer extends JSObject {
byte_length: uintptr;
+ max_byte_length: uintptr;
backing_store: ExternalPointer;
extension: RawPtr;
bit_field: JSArrayBufferFlags;
@@ -29,10 +31,16 @@ macro IsDetachedBuffer(buffer: JSArrayBuffer): bool {
return buffer.bit_field.was_detached;
}
+@export
macro IsSharedArrayBuffer(buffer: JSArrayBuffer): bool {
return buffer.bit_field.is_shared;
}
+@export
+macro IsResizableArrayBuffer(buffer: JSArrayBuffer): bool {
+ return buffer.bit_field.is_resizable;
+}
+
@abstract
@generateCppClass
extern class JSArrayBufferView extends JSObject {
@@ -41,11 +49,35 @@ extern class JSArrayBufferView extends JSObject {
byte_length: uintptr;
}
+// We have 4 different TypedArrays:
+// 1) Normal (backed by AB / SAB) or non-length tracking backed by GSAB (can't
+// go oob once constructed) 2) Non-length tracking backed by RAB (can go oob
+// once constructed) 3) Length-tracking backed by RAB (JSArrayBuffer stores the
+// length) 4) Length-tracking backed by GSAB (BackingStore stores the length)
+bitfield struct JSTypedArrayFlags extends uint32 {
+ is_length_tracking: bool: 1 bit;
+ is_backed_by_rab: bool: 1 bit;
+}
+
@generateCppClass
extern class JSTypedArray extends JSArrayBufferView {
length: uintptr;
external_pointer: ExternalPointer;
base_pointer: ByteArray|Smi;
+ bit_field: JSTypedArrayFlags;
+ // Pads header size to be a multiple of kTaggedSize.
+ @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
+ @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
+}
+
+@export
+macro IsVariableLengthTypedArray(array: JSTypedArray): bool {
+ return array.bit_field.is_length_tracking || array.bit_field.is_backed_by_rab;
+}
+
+@export
+macro IsLengthTrackingTypedArray(array: JSTypedArray): bool {
+ return array.bit_field.is_length_tracking;
}
@generateCppClass
diff --git a/chromium/v8/src/objects/js-array.h b/chromium/v8/src/objects/js-array.h
index a8b336d2be9..776cb4446b4 100644
--- a/chromium/v8/src/objects/js-array.h
+++ b/chromium/v8/src/objects/js-array.h
@@ -58,8 +58,8 @@ class JSArray : public JSObject {
// Initializes the array to a certain length.
inline bool AllowsSetLength();
- V8_EXPORT_PRIVATE static void SetLength(Handle<JSArray> array,
- uint32_t length);
+ V8_EXPORT_PRIVATE static Maybe<bool> SetLength(Handle<JSArray> array,
+ uint32_t length);
// Set the content of the array to the content of storage.
static inline void SetContent(Handle<JSArray> array,
diff --git a/chromium/v8/src/objects/js-function-inl.h b/chromium/v8/src/objects/js-function-inl.h
index 5c8cb5b644a..b0a53f58759 100644
--- a/chromium/v8/src/objects/js-function-inl.h
+++ b/chromium/v8/src/objects/js-function-inl.h
@@ -91,7 +91,7 @@ void JSFunction::MarkForOptimization(ConcurrencyMode mode) {
mode = ConcurrencyMode::kNotConcurrent;
}
- DCHECK(!is_compiled() || ActiveTierIsIgnition() || ActiveTierIsNCI() ||
+ DCHECK(!is_compiled() || ActiveTierIsIgnition() ||
ActiveTierIsMidtierTurboprop() || ActiveTierIsBaseline());
DCHECK(!ActiveTierIsTurbofan());
DCHECK(shared().IsInterpreted());
@@ -131,8 +131,8 @@ void JSFunction::CompleteInobjectSlackTrackingIfActive() {
}
}
-template <typename LocalIsolate>
-AbstractCode JSFunction::abstract_code(LocalIsolate* isolate) {
+template <typename IsolateT>
+AbstractCode JSFunction::abstract_code(IsolateT* isolate) {
if (ActiveTierIsIgnition()) {
return AbstractCode::cast(shared().GetBytecodeArray(isolate));
} else {
@@ -206,27 +206,28 @@ void JSFunction::set_context(HeapObject value, WriteBarrierMode mode) {
CONDITIONAL_WRITE_BARRIER(*this, kContextOffset, value, mode);
}
-ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map, HeapObject,
- kPrototypeOrInitialMapOffset, map().has_prototype_slot())
+RELEASE_ACQUIRE_ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map,
+ HeapObject, kPrototypeOrInitialMapOffset,
+ map().has_prototype_slot())
DEF_GETTER(JSFunction, has_prototype_slot, bool) {
return map(cage_base).has_prototype_slot();
}
DEF_GETTER(JSFunction, initial_map, Map) {
- return Map::cast(prototype_or_initial_map(cage_base));
+ return Map::cast(prototype_or_initial_map(cage_base, kAcquireLoad));
}
DEF_GETTER(JSFunction, has_initial_map, bool) {
DCHECK(has_prototype_slot(cage_base));
- return prototype_or_initial_map(cage_base).IsMap(cage_base);
+ return prototype_or_initial_map(cage_base, kAcquireLoad).IsMap(cage_base);
}
DEF_GETTER(JSFunction, has_instance_prototype, bool) {
DCHECK(has_prototype_slot(cage_base));
return has_initial_map(cage_base) ||
- !prototype_or_initial_map(cage_base).IsTheHole(
- GetReadOnlyRoots(cage_base));
+ !prototype_or_initial_map(cage_base, kAcquireLoad)
+ .IsTheHole(GetReadOnlyRoots(cage_base));
}
DEF_GETTER(JSFunction, has_prototype, bool) {
@@ -251,7 +252,7 @@ DEF_GETTER(JSFunction, instance_prototype, HeapObject) {
return initial_map(cage_base).prototype(cage_base);
// When there is no initial map and the prototype is a JSReceiver, the
// initial map field is used for the prototype field.
- return HeapObject::cast(prototype_or_initial_map(cage_base));
+ return HeapObject::cast(prototype_or_initial_map(cage_base, kAcquireLoad));
}
DEF_GETTER(JSFunction, prototype, Object) {
diff --git a/chromium/v8/src/objects/js-function.cc b/chromium/v8/src/objects/js-function.cc
index 35010be838e..b191746fae2 100644
--- a/chromium/v8/src/objects/js-function.cc
+++ b/chromium/v8/src/objects/js-function.cc
@@ -101,9 +101,6 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
} else if ((kinds & CodeKindFlag::BASELINE) != 0) {
*highest_tier = CodeKind::BASELINE;
return true;
- } else if ((kinds & CodeKindFlag::NATIVE_CONTEXT_INDEPENDENT) != 0) {
- *highest_tier = CodeKind::NATIVE_CONTEXT_INDEPENDENT;
- return true;
} else if ((kinds & CodeKindFlag::INTERPRETED_FUNCTION) != 0) {
*highest_tier = CodeKind::INTERPRETED_FUNCTION;
return true;
@@ -135,7 +132,6 @@ CodeKind JSFunction::GetActiveTier() const {
DCHECK(highest_tier == CodeKind::TURBOFAN ||
highest_tier == CodeKind::BASELINE ||
highest_tier == CodeKind::TURBOPROP ||
- highest_tier == CodeKind::NATIVE_CONTEXT_INDEPENDENT ||
highest_tier == CodeKind::INTERPRETED_FUNCTION);
return highest_tier;
}
@@ -145,11 +141,6 @@ bool JSFunction::ActiveTierIsTurbofan() const {
return GetActiveTier() == CodeKind::TURBOFAN;
}
-bool JSFunction::ActiveTierIsNCI() const {
- if (!shared().HasBytecodeArray()) return false;
- return GetActiveTier() == CodeKind::NATIVE_CONTEXT_INDEPENDENT;
-}
-
bool JSFunction::ActiveTierIsBaseline() const {
return GetActiveTier() == CodeKind::BASELINE;
}
@@ -376,7 +367,6 @@ void JSFunction::InitializeFeedbackCell(
const bool needs_feedback_vector =
!FLAG_lazy_feedback_allocation || FLAG_always_opt ||
- function->shared().may_have_cached_code() ||
// We also need a feedback vector for certain log events, collecting type
// profile and more precise code coverage.
FLAG_log_function_events || !isolate->is_best_effort_code_coverage() ||
@@ -410,7 +400,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
// Put the value in the initial map field until an initial map is needed.
// At that point, a new initial map is created and the prototype is put
// into the initial map where it belongs.
- function->set_prototype_or_initial_map(*value);
+ function->set_prototype_or_initial_map(*value, kReleaseStore);
} else {
Handle<Map> new_map =
Map::Copy(isolate, initial_map, "SetInstancePrototype");
@@ -435,7 +425,7 @@ void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
// prototype is put into the initial map where it belongs.
- function->set_prototype_or_initial_map(*value);
+ function->set_prototype_or_initial_map(*value, kReleaseStore);
if (value->IsJSObject()) {
// Optimize as prototype to detach it from its transition tree.
JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
@@ -498,7 +488,7 @@ void JSFunction::SetInitialMap(Isolate* isolate, Handle<JSFunction> function,
Map::SetPrototype(isolate, map, prototype);
}
map->SetConstructor(*constructor);
- function->set_prototype_or_initial_map(*map);
+ function->set_prototype_or_initial_map(*map, kReleaseStore);
if (FLAG_log_maps) {
LOG(isolate, MapEvent("InitialMap", Handle<Map>(), map, "",
SharedFunctionInfo::DebugName(
@@ -806,6 +796,55 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
return map;
}
+Handle<Map> JSFunction::GetDerivedRabGsabMap(Isolate* isolate,
+ Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target) {
+ {
+ DisallowHeapAllocation no_alloc;
+ NativeContext context = isolate->context().native_context();
+ if (*new_target == context.uint8_array_fun()) {
+ return handle(context.rab_gsab_uint8_array_map(), isolate);
+ }
+ if (*new_target == context.int8_array_fun()) {
+ return handle(context.rab_gsab_int8_array_map(), isolate);
+ }
+ if (*new_target == context.uint16_array_fun()) {
+ return handle(context.rab_gsab_uint16_array_map(), isolate);
+ }
+ if (*new_target == context.int16_array_fun()) {
+ return handle(context.rab_gsab_int16_array_map(), isolate);
+ }
+ if (*new_target == context.uint32_array_fun()) {
+ return handle(context.rab_gsab_uint32_array_map(), isolate);
+ }
+ if (*new_target == context.int32_array_fun()) {
+ return handle(context.rab_gsab_int32_array_map(), isolate);
+ }
+ if (*new_target == context.float32_array_fun()) {
+ return handle(context.rab_gsab_float32_array_map(), isolate);
+ }
+ if (*new_target == context.float64_array_fun()) {
+ return handle(context.rab_gsab_float64_array_map(), isolate);
+ }
+ if (*new_target == context.biguint64_array_fun()) {
+ return handle(context.rab_gsab_biguint64_array_map(), isolate);
+ }
+ if (*new_target == context.bigint64_array_fun()) {
+ return handle(context.rab_gsab_bigint64_array_map(), isolate);
+ }
+ }
+
+ // This only happens when subclassing TypedArrays. Create a new map with the
+ // corresponding RAB / GSAB ElementsKind. Note: the map is not cached and
+ // reused -> every array gets a unique map, making ICs slow.
+ Handle<Map> map =
+ GetDerivedMap(isolate, constructor, new_target).ToHandleChecked();
+ Handle<Map> rab_gsab_map = Map::Copy(isolate, map, "RAB / GSAB");
+ rab_gsab_map->set_elements_kind(
+ GetCorrespondingRabGsabElementsKind(map->elements_kind()));
+ return rab_gsab_map;
+}
+
int JSFunction::ComputeInstanceSizeWithMinSlack(Isolate* isolate) {
CHECK(has_initial_map());
if (initial_map().IsInobjectSlackTrackingInProgress()) {
diff --git a/chromium/v8/src/objects/js-function.h b/chromium/v8/src/objects/js-function.h
index 76af98efe72..9d0fc533f48 100644
--- a/chromium/v8/src/objects/js-function.h
+++ b/chromium/v8/src/objects/js-function.h
@@ -55,7 +55,7 @@ class JSBoundFunction
class JSFunction : public JSFunctionOrBoundFunction {
public:
// [prototype_or_initial_map]:
- DECL_ACCESSORS(prototype_or_initial_map, HeapObject)
+ DECL_RELEASE_ACQUIRE_ACCESSORS(prototype_or_initial_map, HeapObject)
// [shared]: The information about the function that
// can be shared by instances.
@@ -93,8 +93,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
- template <typename LocalIsolate>
- inline AbstractCode abstract_code(LocalIsolate* isolate);
+ template <typename IsolateT>
+ inline AbstractCode abstract_code(IsolateT* isolate);
// The predicates for querying code kinds related to this function have
// specific terminology:
@@ -122,7 +122,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
CodeKind GetActiveTier() const;
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
- bool ActiveTierIsNCI() const;
bool ActiveTierIsBaseline() const;
bool ActiveTierIsIgnitionOrBaseline() const;
bool ActiveTierIsMidtierTurboprop() const;
@@ -238,6 +237,11 @@ class JSFunction : public JSFunctionOrBoundFunction {
Isolate* isolate, Handle<JSFunction> constructor,
Handle<JSReceiver> new_target);
+ // Like GetDerivedMap, but returns a map with a RAB / GSAB ElementsKind.
+ static V8_WARN_UNUSED_RESULT Handle<Map> GetDerivedRabGsabMap(
+ Isolate* isolate, Handle<JSFunction> constructor,
+ Handle<JSReceiver> new_target);
+
// Get and set the prototype property on a JSFunction. If the
// function has an initial map the prototype is set on the initial
// map. Otherwise, the prototype is put in the initial map field
diff --git a/chromium/v8/src/objects/js-locale.cc b/chromium/v8/src/objects/js-locale.cc
index a8da4a96128..236c673ca36 100644
--- a/chromium/v8/src/objects/js-locale.cc
+++ b/chromium/v8/src/objects/js-locale.cc
@@ -20,10 +20,15 @@
#include "src/objects/intl-objects.h"
#include "src/objects/js-locale-inl.h"
#include "src/objects/objects-inl.h"
+#include "unicode/calendar.h"
#include "unicode/char16ptr.h"
+#include "unicode/coll.h"
+#include "unicode/dtptngen.h"
#include "unicode/localebuilder.h"
#include "unicode/locid.h"
+#include "unicode/ucal.h"
#include "unicode/uloc.h"
+#include "unicode/ulocdata.h"
#include "unicode/unistr.h"
namespace v8 {
@@ -165,6 +170,11 @@ bool IsUnicodeVariantSubtag(const std::string& value) {
bool IsExtensionSingleton(const std::string& value) {
return IsAlphanum(value, 1, 1);
}
+
+int32_t weekdayFromEDaysOfWeek(icu::Calendar::EDaysOfWeek eDaysOfWeek) {
+ return (eDaysOfWeek == icu::Calendar::SUNDAY) ? 7 : eDaysOfWeek - 1;
+}
+
} // namespace
bool JSLocale::Is38AlphaNumList(const std::string& value) {
@@ -450,6 +460,340 @@ MaybeHandle<JSLocale> JSLocale::Minimize(Isolate* isolate,
return Construct(isolate, result);
}
+template <typename T>
+MaybeHandle<JSArray> GetKeywordValuesFromLocale(
+ Isolate* isolate, const char* key, const char* unicode_key,
+ const icu::Locale& locale,
+ const std::map<std::string, std::string>& substitutions) {
+ Factory* factory = isolate->factory();
+ UErrorCode status = U_ZERO_ERROR;
+ std::string ext =
+ locale.getUnicodeKeywordValue<std::string>(unicode_key, status);
+ if (!ext.empty()) {
+ Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
+ Handle<String> str = factory->NewStringFromAsciiChecked(ext.c_str());
+ fixed_array->set(0, *str);
+ return factory->NewJSArrayWithElements(fixed_array);
+ }
+ status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ T::getKeywordValuesForLocale(key, locale, true, status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ int32_t count = enumeration->count(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ Handle<FixedArray> fixed_array = factory->NewFixedArray(count);
+
+ int32_t index = 0;
+ for (const char* item = enumeration->next(nullptr, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(nullptr, status)) {
+ auto mapped = substitutions.find(item);
+ if (mapped != substitutions.end()) {
+ item = mapped->second.c_str();
+ if (*item == '\0') {
+ continue;
+ }
+ }
+ Handle<String> str = factory->NewStringFromAsciiChecked(item);
+ fixed_array->set(index++, *str);
+ }
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+MaybeHandle<JSArray> JSLocale::Calendars(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ const std::map<std::string, std::string> substitutions(
+ {{"gregorian", "gregory"}, {"ethiopic-amete-alem", "ethioaa"}});
+ return GetKeywordValuesFromLocale<icu::Calendar>(isolate, "calendar", "ca",
+ icu_locale, substitutions);
+}
+
+MaybeHandle<JSArray> JSLocale::Collations(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ const std::map<std::string, std::string> substitutions(
+ {{"standard", ""}, {"search", ""}});
+ return GetKeywordValuesFromLocale<icu::Collator>(isolate, "collations", "co",
+ icu_locale, substitutions);
+}
+
+MaybeHandle<JSArray> JSLocale::HourCycles(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ // Let preferred be loc.[[HourCycle]].
+ // Let locale be loc.[[Locale]].
+ icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ Factory* factory = isolate->factory();
+
+ // Assert: locale matches the unicode_locale_id production.
+
+ // Let list be a List of 1 or more hour cycle identifiers, which must be
+ // String values indicating either the 12-hour format ("h11", "h12") or the
+ // 24-hour format ("h23", "h24"), sorted in descending preference of those in
+ // common use in the locale for date and time formatting.
+
+ // Return CreateArrayFromListAndPreferred( list, preferred ).
+ Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
+ UErrorCode status = U_ZERO_ERROR;
+ std::string ext =
+ icu_locale.getUnicodeKeywordValue<std::string>("hc", status);
+ if (!ext.empty()) {
+ Handle<String> str = factory->NewStringFromAsciiChecked(ext.c_str());
+ fixed_array->set(0, *str);
+ return factory->NewJSArrayWithElements(fixed_array);
+ }
+ status = U_ZERO_ERROR;
+ std::unique_ptr<icu::DateTimePatternGenerator> generator(
+ icu::DateTimePatternGenerator::createInstance(icu_locale, status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+
+ UDateFormatHourCycle hc = generator->getDefaultHourCycle(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ Handle<String> hour_cycle;
+
+ switch (hc) {
+ case UDAT_HOUR_CYCLE_11:
+ hour_cycle = factory->h11_string();
+ break;
+ case UDAT_HOUR_CYCLE_12:
+ hour_cycle = factory->h12_string();
+ break;
+ case UDAT_HOUR_CYCLE_23:
+ hour_cycle = factory->h23_string();
+ break;
+ case UDAT_HOUR_CYCLE_24:
+ hour_cycle = factory->h24_string();
+ break;
+ default:
+ break;
+ }
+ fixed_array->set(0, *hour_cycle);
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+MaybeHandle<JSArray> JSLocale::NumberingSystems(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ // Let preferred be loc.[[NumberingSystem]].
+
+ // Let locale be loc.[[Locale]].
+ icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ Factory* factory = isolate->factory();
+
+ // Assert: locale matches the unicode_locale_id production.
+
+ // Let list be a List of 1 or more numbering system identifiers, which must be
+ // String values conforming to the type sequence from UTS 35 Unicode Locale
+ // Identifier, section 3.2, sorted in descending preference of those in common
+ // use in the locale for formatting numeric values.
+
+ // Return CreateArrayFromListAndPreferred( list, preferred ).
+ UErrorCode status = U_ZERO_ERROR;
+ Handle<FixedArray> fixed_array = factory->NewFixedArray(1);
+ std::string numbering_system =
+ icu_locale.getUnicodeKeywordValue<std::string>("nu", status);
+ if (numbering_system.empty()) {
+ numbering_system = Intl::GetNumberingSystem(icu_locale);
+ }
+ Handle<String> str =
+ factory->NewStringFromAsciiChecked(numbering_system.c_str());
+
+ fixed_array->set(0, *str);
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+MaybeHandle<Object> JSLocale::TimeZones(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ // Let loc be the this value.
+
+ // Perform ? RequireInternalSlot(loc, [[InitializedLocale]])
+
+ // Let locale be loc.[[Locale]].
+ icu::Locale icu_locale(*(locale->icu_locale().raw()));
+ Factory* factory = isolate->factory();
+
+ // If the unicode_language_id production of locale does not contain the
+ // ["-" unicode_region_subtag] sequence, return undefined.
+ const char* region = icu_locale.getCountry();
+ if (region == nullptr || strlen(region) == 0) {
+ return factory->undefined_value();
+ }
+
+ // Return TimeZonesOfLocale(loc).
+
+ // Let locale be loc.[[Locale]].
+
+ // Assert: locale matches the unicode_locale_id production.
+
+ // Let region be the substring of locale corresponding to the
+ // unicode_region_subtag production of the unicode_language_id.
+
+ // Let list be a List of 1 or more time zone identifiers, which must be String
+ // values indicating a Zone or Link name of the IANA Time Zone Database,
+ // sorted in descending preference of those in common use in region.
+ int32_t index = 0;
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::TimeZone::createTimeZoneIDEnumeration(UCAL_ZONE_TYPE_CANONICAL,
+ region, nullptr, status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ int32_t count = enumeration->count(status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+
+ // Return CreateArrayFromList( list ).
+ Handle<FixedArray> fixed_array = factory->NewFixedArray(count);
+ for (const char* item = enumeration->next(nullptr, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(nullptr, status)) {
+ Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(item);
+ fixed_array->set(index++, *str);
+ }
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+MaybeHandle<JSObject> JSLocale::TextInfo(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ // Let loc be the this value.
+
+ // Perform ? RequireInternalSlot(loc, [[InitializedLocale]]).
+
+ // Let locale be loc.[[Locale]].
+
+ // Assert: locale matches the unicode_locale_id production.
+
+ Factory* factory = isolate->factory();
+ // Let info be ! ObjectCreate(%Object.prototype%).
+ Handle<JSObject> info = factory->NewJSObject(isolate->object_function());
+
+ // Let dir be "ltr".
+ Handle<String> dir = factory->ltr_string();
+
+ // If the default general ordering of characters (characterOrder) within a
+ // line in the locale is right-to-left, then
+ UErrorCode status = U_ZERO_ERROR;
+ ULayoutType orientation = uloc_getCharacterOrientation(
+ (locale->icu_locale().raw())->getName(), &status);
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSObject);
+ }
+ if (orientation == ULOC_LAYOUT_LTR) {
+ // Let dir be "rtl".
+ dir = factory->rtl_string();
+ }
+
+ // Perform ! CreateDataPropertyOrThrow(info, "direction", dir).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, info, factory->direction_string(), dir, Just(kDontThrow))
+ .FromJust());
+
+ // Return info.
+ return info;
+}
+
+MaybeHandle<JSObject> JSLocale::WeekInfo(Isolate* isolate,
+ Handle<JSLocale> locale) {
+ // Let loc be the this value.
+
+ // Perform ? RequireInternalSlot(loc, [[InitializedLocale]]).
+
+ // Let locale be loc.[[Locale]].
+
+ // Assert: locale matches the unicode_locale_id production.
+ Factory* factory = isolate->factory();
+
+ // Let info be ! ObjectCreate(%Object.prototype%).
+ Handle<JSObject> info = factory->NewJSObject(isolate->object_function());
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::Calendar> calendar(
+ icu::Calendar::createInstance(*(locale->icu_locale().raw()), status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSObject);
+ }
+
+ // Let fd be the weekday value indicating which day of the week is considered
+ // the 'first' day, for calendar purposes, in the locale.
+ int32_t fd = weekdayFromEDaysOfWeek(calendar->getFirstDayOfWeek());
+ bool thursday_is_weekend =
+ (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_THURSDAY, status));
+ bool friday_is_weekend =
+ (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_FRIDAY, status));
+ bool saturday_is_weekend =
+ (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_SATURDAY, status));
+ bool sunday_is_weekend =
+ (UCAL_WEEKDAY != calendar->getDayOfWeekType(UCAL_SUNDAY, status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSObject);
+ }
+
+ // Let ws be the weekday value indicating which day of the week is considered
+ // the starting day of the 'weekend', for calendar purposes, in the locale.
+ int32_t ws = thursday_is_weekend ? 4 : (friday_is_weekend ? 5 : 6);
+
+ // Let we be the weekday value indicating which day of the week is considered
+ // the ending day of the 'weekend', for calendar purposes, in the locale.
+ int32_t we = sunday_is_weekend ? 7 : (saturday_is_weekend ? 6 : 5);
+
+ // Let md be the minimal days required in the first week of a month or year,
+ // for calendar purposes, in the locale.
+ int32_t md = calendar->getMinimalDaysInFirstWeek();
+
+ // Perform ! CreateDataPropertyOrThrow(info, "firstDay", fd).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, info, factory->firstDay_string(),
+ factory->NewNumberFromInt(fd), Just(kDontThrow))
+ .FromJust());
+
+ // Perform ! CreateDataPropertyOrThrow(info, "weekendStart", ws).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, info, factory->weekendStart_string(),
+ factory->NewNumberFromInt(ws), Just(kDontThrow))
+ .FromJust());
+
+ // Perform ! CreateDataPropertyOrThrow(info, "weekendEnd", we).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, info, factory->weekendEnd_string(),
+ factory->NewNumberFromInt(we), Just(kDontThrow))
+ .FromJust());
+
+ // Perform ! CreateDataPropertyOrThrow(info, "minimalDays", md).
+ CHECK(JSReceiver::CreateDataProperty(
+ isolate, info, factory->minimalDays_string(),
+ factory->NewNumberFromInt(md), Just(kDontThrow))
+ .FromJust());
+
+ // Return info.
+ return info;
+}
+
Handle<Object> JSLocale::Language(Isolate* isolate, Handle<JSLocale> locale) {
Factory* factory = isolate->factory();
const char* language = locale->icu_locale().raw()->getLanguage();
diff --git a/chromium/v8/src/objects/js-locale.h b/chromium/v8/src/objects/js-locale.h
index d864c8272f5..5f57dca5169 100644
--- a/chromium/v8/src/objects/js-locale.h
+++ b/chromium/v8/src/objects/js-locale.h
@@ -40,6 +40,21 @@ class JSLocale : public TorqueGeneratedJSLocale<JSLocale, JSObject> {
static MaybeHandle<JSLocale> Minimize(Isolate* isolate,
Handle<JSLocale> locale);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> Calendars(
+ Isolate* isolate, Handle<JSLocale> locale);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> Collations(
+ Isolate* isolate, Handle<JSLocale> locale);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> HourCycles(
+ Isolate* isolate, Handle<JSLocale> locale);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> NumberingSystems(
+ Isolate* isolate, Handle<JSLocale> locale);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> TextInfo(
+ Isolate* isolate, Handle<JSLocale> locale);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<Object> TimeZones(
+ Isolate* isolate, Handle<JSLocale> locale);
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> WeekInfo(
+ Isolate* isolate, Handle<JSLocale> locale);
+
static Handle<Object> Language(Isolate* isolate, Handle<JSLocale> locale);
static Handle<Object> Script(Isolate* isolate, Handle<JSLocale> locale);
static Handle<Object> Region(Isolate* isolate, Handle<JSLocale> locale);
diff --git a/chromium/v8/src/objects/js-objects-inl.h b/chromium/v8/src/objects/js-objects-inl.h
index cbbbc9fc9ec..6d3dc9cdde4 100644
--- a/chromium/v8/src/objects/js-objects-inl.h
+++ b/chromium/v8/src/objects/js-objects-inl.h
@@ -401,25 +401,31 @@ Object JSObject::InObjectPropertyAtPut(int index, Object value,
}
void JSObject::InitializeBody(Map map, int start_offset,
- Object pre_allocated_value, Object filler_value) {
- DCHECK_IMPLIES(filler_value.IsHeapObject(),
- !ObjectInYoungGeneration(filler_value));
- DCHECK_IMPLIES(pre_allocated_value.IsHeapObject(),
- !ObjectInYoungGeneration(pre_allocated_value));
+ bool is_slack_tracking_in_progress,
+ MapWord filler_map, Object undefined_filler) {
int size = map.instance_size();
int offset = start_offset;
- if (filler_value != pre_allocated_value) {
+ if (is_slack_tracking_in_progress) {
int end_of_pre_allocated_offset =
size - (map.UnusedPropertyFields() * kTaggedSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
+ // fill start with references to the undefined value object
while (offset < end_of_pre_allocated_offset) {
- WRITE_FIELD(*this, offset, pre_allocated_value);
+ WRITE_FIELD(*this, offset, undefined_filler);
+ offset += kTaggedSize;
+ }
+ // fill the remainder with one word filler objects (ie just a map word)
+ while (offset < size) {
+ Object fm = Object(filler_map.ptr());
+ WRITE_FIELD(*this, offset, fm);
+ offset += kTaggedSize;
+ }
+ } else {
+ while (offset < size) {
+ // fill with references to the undefined value object
+ WRITE_FIELD(*this, offset, undefined_filler);
offset += kTaggedSize;
}
- }
- while (offset < size) {
- WRITE_FIELD(*this, offset, filler_value);
- offset += kTaggedSize;
}
}
@@ -769,7 +775,6 @@ static inline bool ShouldConvertToSlowElements(JSObject object,
if (index - capacity >= JSObject::kMaxGap) return true;
*new_capacity = JSObject::NewElementsCapacity(index + 1);
DCHECK_LT(index, *new_capacity);
- // TODO(ulan): Check if it works with young large objects.
if (*new_capacity <= JSObject::kMaxUncheckedOldFastElementsLength ||
(*new_capacity <= JSObject::kMaxUncheckedFastElementsLength &&
ObjectInYoungGeneration(object))) {
diff --git a/chromium/v8/src/objects/js-objects.cc b/chromium/v8/src/objects/js-objects.cc
index 01df8e1524f..032296aaa48 100644
--- a/chromium/v8/src/objects/js-objects.cc
+++ b/chromium/v8/src/objects/js-objects.cc
@@ -2054,8 +2054,7 @@ MaybeHandle<FixedArray> GetOwnValuesOrEntries(Isolate* isolate,
MaybeHandle<FixedArray>());
if (get_entries) {
- Handle<FixedArray> entry_storage =
- isolate->factory()->NewUninitializedFixedArray(2);
+ Handle<FixedArray> entry_storage = isolate->factory()->NewFixedArray(2);
entry_storage->set(0, *key);
entry_storage->set(1, *value);
value = isolate->factory()->NewJSArrayWithElements(entry_storage,
@@ -2767,14 +2766,14 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// If the map does not add named properties, simply set the map.
if (old_map->NumberOfOwnDescriptors() ==
new_map->NumberOfOwnDescriptors()) {
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
return;
}
// If the map adds a new kDescriptor property, simply set the map.
PropertyDetails details = new_map->GetLastDescriptorDetails(isolate);
if (details.location() == kDescriptor) {
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
return;
}
@@ -2790,7 +2789,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
auto value = isolate->factory()->NewHeapNumberWithHoleNaN();
object->FastPropertyAtPut(index, *value);
}
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
return;
}
@@ -2818,7 +2817,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// Set the new property value and do the map transition.
object->SetProperties(*new_storage);
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
return;
}
@@ -2831,7 +2830,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// converted to doubles.
if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
unused, &old_number_of_fields)) {
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
return;
}
@@ -2946,7 +2945,7 @@ void MigrateFastToFast(Isolate* isolate, Handle<JSObject> object,
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
}
void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
@@ -3044,7 +3043,7 @@ void MigrateFastToSlow(Isolate* isolate, Handle<JSObject> object,
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
object->SetProperties(*ord_dictionary);
@@ -3088,7 +3087,7 @@ void JSObject::MigrateToMap(Isolate* isolate, Handle<JSObject> object,
CHECK(new_map->is_dictionary_map());
// Slow-to-slow migration is trivial.
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
} else if (!new_map->is_dictionary_map()) {
MigrateFastToFast(isolate, object, new_map);
if (old_map->is_prototype_map()) {
@@ -3118,10 +3117,9 @@ void JSObject::MigrateToMap(Isolate* isolate, Handle<JSObject> object,
// When adding code here, add a DisallowGarbageCollection too.
}
-void JSObject::ForceSetPrototype(Handle<JSObject> object,
+void JSObject::ForceSetPrototype(Isolate* isolate, Handle<JSObject> object,
Handle<HeapObject> proto) {
// object.__proto__ = proto;
- Isolate* isolate = object->GetIsolate();
Handle<Map> old_map = Handle<Map>(object->map(), isolate);
Handle<Map> new_map = Map::Copy(isolate, old_map, "ForceSetPrototype");
Map::SetPrototype(isolate, new_map, proto);
@@ -3199,7 +3197,7 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
Object value = storage->get(i);
object->FastPropertyAtPut(index, value);
}
- object->synchronized_set_map(*map);
+ object->set_map(*map, kReleaseStore);
}
void JSObject::MigrateInstance(Isolate* isolate, Handle<JSObject> object) {
@@ -3489,7 +3487,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
DCHECK_LE(unused_property_fields, inobject_props);
// Transform the object.
new_map->SetInObjectUnusedPropertyFields(inobject_props);
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
object->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
// Check that it really works.
DCHECK(object->HasFastProperties());
@@ -3608,7 +3606,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
LOG(isolate, MapEvent("SlowToFast", old_map, new_map, reason));
}
// Transform the object.
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
object->SetProperties(*fields);
DCHECK(object->IsJSObject());
@@ -4222,10 +4220,15 @@ bool JSObject::HasEnumerableElements() {
}
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ TYPED_ARRAYS(TYPED_ARRAY_CASE) {
+ size_t length = JSTypedArray::cast(object).length();
+ return length > 0;
+ }
+
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
{
- size_t length = JSTypedArray::cast(object).length();
+ size_t length = JSTypedArray::cast(object).GetLength();
return length > 0;
}
case DICTIONARY_ELEMENTS: {
@@ -4769,7 +4772,7 @@ void JSObject::SetImmutableProto(Handle<JSObject> object) {
Handle<Map> new_map =
Map::TransitionToImmutableProto(object->GetIsolate(), map);
- object->synchronized_set_map(*new_map);
+ object->set_map(*new_map, kReleaseStore);
}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
@@ -4851,9 +4854,9 @@ static ElementsKind BestFittingFastElementsKind(JSObject object) {
}
// static
-void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes) {
+Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
+ Handle<Object> value,
+ PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
DCHECK(object->map(isolate).is_extensible());
@@ -4896,13 +4899,15 @@ void JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
}
to = GetMoreGeneralElementsKind(kind, to);
ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
- accessor->Add(object, index, value, attributes, new_capacity);
+ MAYBE_RETURN(accessor->Add(object, index, value, attributes, new_capacity),
+ Nothing<bool>());
if (object->IsJSArray(isolate) && index >= old_length) {
Handle<Object> new_length =
isolate->factory()->NewNumberFromUint(index + 1);
JSArray::cast(*object).set_length(*new_length);
}
+ return Just(true);
}
template <AllocationSiteUpdateMode update_or_check>
@@ -4969,7 +4974,15 @@ void JSObject::TransitionElementsKind(Handle<JSObject> object,
DCHECK((IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) ||
(IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind)));
uint32_t c = static_cast<uint32_t>(object->elements().length());
- ElementsAccessor::ForKind(to_kind)->GrowCapacityAndConvert(object, c);
+ if (ElementsAccessor::ForKind(to_kind)
+ ->GrowCapacityAndConvert(object, c)
+ .IsNothing()) {
+ // TODO(victorgomes): Temporarily forcing a fatal error here in case of
+ // overflow, until all users of TransitionElementsKind can handle
+ // exceptions.
+ FATAL("Fatal JavaScript invalid array size transitioning elements kind.");
+ UNREACHABLE();
+ }
}
}
@@ -5017,6 +5030,7 @@ int JSObject::GetFastElementsUsage() {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
UNREACHABLE();
}
diff --git a/chromium/v8/src/objects/js-objects.h b/chromium/v8/src/objects/js-objects.h
index b1f22ed8f6f..2e94bf17a95 100644
--- a/chromium/v8/src/objects/js-objects.h
+++ b/chromium/v8/src/objects/js-objects.h
@@ -440,10 +440,9 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
const char* name, Handle<Object> value,
PropertyAttributes attributes);
- V8_EXPORT_PRIVATE static void AddDataElement(Handle<JSObject> receiver,
- uint32_t index,
- Handle<Object> value,
- PropertyAttributes attributes);
+ V8_EXPORT_PRIVATE static Maybe<bool> AddDataElement(
+ Handle<JSObject> receiver, uint32_t index, Handle<Object> value,
+ PropertyAttributes attributes);
// Extend the receiver with a single fast property appeared first in the
// passed map. This also extends the property backing store if necessary.
@@ -620,7 +619,7 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// Forces a prototype without any of the checks that the regular SetPrototype
// would do.
- static void ForceSetPrototype(Handle<JSObject> object,
+ static void ForceSetPrototype(Isolate* isolate, Handle<JSObject> object,
Handle<HeapObject> proto);
// Convert the object to use the canonical dictionary
@@ -680,11 +679,12 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
// Initializes the body starting at |start_offset|. It is responsibility of
// the caller to initialize object header. Fill the pre-allocated fields with
- // pre_allocated_value and the rest with filler_value.
+ // undefined_value and the rest with filler_map.
// Note: this call does not update write barrier, the caller is responsible
- // to ensure that |filler_value| can be collected without WB here.
+ // to ensure that |filler_map| can be collected without WB here.
inline void InitializeBody(Map map, int start_offset,
- Object pre_allocated_value, Object filler_value);
+ bool is_slack_tracking_in_progress,
+ MapWord filler_map, Object undefined_value);
// Check whether this object references another object
bool ReferencesObject(Object obj);
@@ -702,11 +702,11 @@ class JSObject : public TorqueGeneratedJSObject<JSObject, JSReceiver> {
DECL_PRINTER(JSObject)
DECL_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
- bool PrintProperties(std::ostream& os); // NOLINT
- void PrintElements(std::ostream& os); // NOLINT
+ bool PrintProperties(std::ostream& os);
+ void PrintElements(std::ostream& os);
#endif
#if defined(DEBUG) || defined(OBJECT_PRINT)
- void PrintTransitions(std::ostream& os); // NOLINT
+ void PrintTransitions(std::ostream& os);
#endif
static void PrintElementsTransition(FILE* file, Handle<JSObject> object,
diff --git a/chromium/v8/src/objects/js-objects.tq b/chromium/v8/src/objects/js-objects.tq
index 9f5bf8554e9..e21f874bbb6 100644
--- a/chromium/v8/src/objects/js-objects.tq
+++ b/chromium/v8/src/objects/js-objects.tq
@@ -67,10 +67,15 @@ macro GetDerivedMap(implicit context: Context)(
return map;
} label SlowPath {
- return runtime::GetDerivedMap(context, target, newTarget);
+ return runtime::GetDerivedMap(context, target, newTarget, FalseConstant());
}
}
+macro GetDerivedRabGsabMap(implicit context: Context)(
+ target: JSFunction, newTarget: JSReceiver): Map {
+ return runtime::GetDerivedMap(context, target, newTarget, TrueConstant());
+}
+
macro AllocateFastOrSlowJSObjectFromMap(implicit context: Context)(map: Map):
JSObject {
let properties: EmptyFixedArray|NameDictionary|SwissNameDictionary =
diff --git a/chromium/v8/src/objects/js-regexp.cc b/chromium/v8/src/objects/js-regexp.cc
index b8a01418dae..0ae91a5ef91 100644
--- a/chromium/v8/src/objects/js-regexp.cc
+++ b/chromium/v8/src/objects/js-regexp.cc
@@ -151,6 +151,27 @@ JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
}
// static
+Handle<String> JSRegExp::StringFromFlags(Isolate* isolate,
+ JSRegExp::Flags flags) {
+ // Ensure that this function is up-to-date with the supported flag options.
+ constexpr size_t kFlagCount = JSRegExp::kFlagCount;
+ STATIC_ASSERT(kFlagCount == 8);
+
+ // Translate to the lexicographically smaller string.
+ int cursor = 0;
+ char buffer[kFlagCount] = {'\0'};
+ if (flags & JSRegExp::kHasIndices) buffer[cursor++] = 'd';
+ if (flags & JSRegExp::kGlobal) buffer[cursor++] = 'g';
+ if (flags & JSRegExp::kIgnoreCase) buffer[cursor++] = 'i';
+ if (flags & JSRegExp::kLinear) buffer[cursor++] = 'l';
+ if (flags & JSRegExp::kMultiline) buffer[cursor++] = 'm';
+ if (flags & JSRegExp::kDotAll) buffer[cursor++] = 's';
+ if (flags & JSRegExp::kUnicode) buffer[cursor++] = 'u';
+ if (flags & JSRegExp::kSticky) buffer[cursor++] = 'y';
+ return isolate->factory()->NewStringFromAsciiChecked(buffer);
+}
+
+// static
MaybeHandle<JSRegExp> JSRegExp::New(Isolate* isolate, Handle<String> pattern,
Flags flags, uint32_t backtrack_limit) {
Handle<JSFunction> constructor = isolate->regexp_function();
diff --git a/chromium/v8/src/objects/js-regexp.h b/chromium/v8/src/objects/js-regexp.h
index c23662251a4..d66912c9e85 100644
--- a/chromium/v8/src/objects/js-regexp.h
+++ b/chromium/v8/src/objects/js-regexp.h
@@ -90,6 +90,9 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static Flags FlagsFromString(Isolate* isolate, Handle<String> flags,
bool* success);
+ V8_EXPORT_PRIVATE static Handle<String> StringFromFlags(Isolate* isolate,
+ Flags flags);
+
bool CanTierUp();
bool MarkedForTierUp();
void ResetLastTierUpTick();
diff --git a/chromium/v8/src/objects/literal-objects.cc b/chromium/v8/src/objects/literal-objects.cc
index 8dfea9f77c1..e6d57de82e2 100644
--- a/chromium/v8/src/objects/literal-objects.cc
+++ b/chromium/v8/src/objects/literal-objects.cc
@@ -37,9 +37,9 @@ inline int EncodeComputedEntry(ClassBoilerplate::ValueKind value_kind,
return flags;
}
-template <typename LocalIsolate>
+template <typename IsolateT>
void AddToDescriptorArrayTemplate(
- LocalIsolate* isolate, Handle<DescriptorArray> descriptor_array_template,
+ IsolateT* isolate, Handle<DescriptorArray> descriptor_array_template,
Handle<Name> name, ClassBoilerplate::ValueKind value_kind,
Handle<Object> value) {
InternalIndex entry = descriptor_array_template->Search(
@@ -90,18 +90,18 @@ void AddToDescriptorArrayTemplate(
}
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<NameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- LocalIsolate* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
+ IsolateT* isolate, Handle<NameDictionary> dictionary, Handle<Name> name,
Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr) {
return NameDictionary::AddNoUpdateNextEnumerationIndex(
isolate, dictionary, name, value, details, entry_out);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SwissNameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- LocalIsolate* isolate, Handle<SwissNameDictionary> dictionary,
+ IsolateT* isolate, Handle<SwissNameDictionary> dictionary,
Handle<Name> name, Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr) {
// SwissNameDictionary does not maintain the enumeration order in property
@@ -109,10 +109,10 @@ Handle<SwissNameDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
return SwissNameDictionary::Add(isolate, dictionary, name, value, details);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<NumberDictionary> DictionaryAddNoUpdateNextEnumerationIndex(
- LocalIsolate* isolate, Handle<NumberDictionary> dictionary,
- uint32_t element, Handle<Object> value, PropertyDetails details,
+ IsolateT* isolate, Handle<NumberDictionary> dictionary, uint32_t element,
+ Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr) {
// NumberDictionary does not maintain the enumeration order, so it's
// a normal Add().
@@ -149,10 +149,9 @@ inline int GetExistingValueIndex(Object value) {
return value.IsSmi() ? Smi::ToInt(value) : kAccessorNotDefined;
}
-template <typename LocalIsolate, typename Dictionary, typename Key>
-void AddToDictionaryTemplate(LocalIsolate* isolate,
- Handle<Dictionary> dictionary, Key key,
- int key_index,
+template <typename IsolateT, typename Dictionary, typename Key>
+void AddToDictionaryTemplate(IsolateT* isolate, Handle<Dictionary> dictionary,
+ Key key, int key_index,
ClassBoilerplate::ValueKind value_kind,
Smi value) {
InternalIndex entry = dictionary->FindEntry(isolate, key);
@@ -372,7 +371,7 @@ void AddToDictionaryTemplate(LocalIsolate* isolate,
// Helper class that eases building of a properties, elements and computed
// properties templates.
-template <typename LocalIsolate>
+template <typename IsolateT>
class ObjectDescriptor {
public:
void IncComputedCount() { ++computed_count_; }
@@ -401,7 +400,7 @@ class ObjectDescriptor {
return computed_properties_;
}
- void CreateTemplates(LocalIsolate* isolate) {
+ void CreateTemplates(IsolateT* isolate) {
auto* factory = isolate->factory();
descriptor_array_template_ = factory->empty_descriptor_array();
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
@@ -443,8 +442,8 @@ class ObjectDescriptor {
temp_handle_ = handle(Smi::zero(), isolate);
}
- void AddConstant(LocalIsolate* isolate, Handle<Name> name,
- Handle<Object> value, PropertyAttributes attribs) {
+ void AddConstant(IsolateT* isolate, Handle<Name> name, Handle<Object> value,
+ PropertyAttributes attribs) {
bool is_accessor = value->IsAccessorInfo();
DCHECK(!value->IsAccessorPair());
if (HasDictionaryProperties()) {
@@ -473,7 +472,7 @@ class ObjectDescriptor {
}
}
- void AddNamedProperty(LocalIsolate* isolate, Handle<Name> name,
+ void AddNamedProperty(IsolateT* isolate, Handle<Name> name,
ClassBoilerplate::ValueKind value_kind,
int value_index) {
Smi value = Smi::FromInt(value_index);
@@ -494,7 +493,7 @@ class ObjectDescriptor {
}
}
- void AddIndexedProperty(LocalIsolate* isolate, uint32_t element,
+ void AddIndexedProperty(IsolateT* isolate, uint32_t element,
ClassBoilerplate::ValueKind value_kind,
int value_index) {
Smi value = Smi::FromInt(value_index);
@@ -516,7 +515,7 @@ class ObjectDescriptor {
next_enumeration_index_ = current_index + 1;
}
- void Finalize(LocalIsolate* isolate) {
+ void Finalize(IsolateT* isolate) {
if (HasDictionaryProperties()) {
DCHECK_EQ(current_computed_index_, computed_properties_->length());
if (!V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
@@ -555,9 +554,9 @@ class ObjectDescriptor {
Handle<Object> temp_handle_;
};
-template <typename LocalIsolate, typename PropertyDict>
+template <typename IsolateT, typename PropertyDict>
void ClassBoilerplate::AddToPropertiesTemplate(
- LocalIsolate* isolate, Handle<PropertyDict> dictionary, Handle<Name> name,
+ IsolateT* isolate, Handle<PropertyDict> dictionary, Handle<Name> name,
int key_index, ClassBoilerplate::ValueKind value_kind, Smi value) {
AddToDictionaryTemplate(isolate, dictionary, name, key_index, value_kind,
value);
@@ -572,9 +571,9 @@ template void ClassBoilerplate::AddToPropertiesTemplate(
Isolate* isolate, Handle<SwissNameDictionary> dictionary, Handle<Name> name,
int key_index, ClassBoilerplate::ValueKind value_kind, Smi value);
-template <typename LocalIsolate>
+template <typename IsolateT>
void ClassBoilerplate::AddToElementsTemplate(
- LocalIsolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
+ IsolateT* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
int key_index, ClassBoilerplate::ValueKind value_kind, Smi value) {
AddToDictionaryTemplate(isolate, dictionary, key, key_index, value_kind,
value);
@@ -586,21 +585,20 @@ template void ClassBoilerplate::AddToElementsTemplate(
LocalIsolate* isolate, Handle<NumberDictionary> dictionary, uint32_t key,
int key_index, ClassBoilerplate::ValueKind value_kind, Smi value);
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
- LocalIsolate* isolate, ClassLiteral* expr) {
+ IsolateT* isolate, ClassLiteral* expr) {
// Create a non-caching handle scope to ensure that the temporary handle used
// by ObjectDescriptor for passing Smis around does not corrupt handle cache
// in CanonicalHandleScope.
- typename LocalIsolate::HandleScopeType scope(isolate);
+ typename IsolateT::HandleScopeType scope(isolate);
auto* factory = isolate->factory();
- ObjectDescriptor<LocalIsolate> static_desc(kMinimumClassPropertiesCount);
- ObjectDescriptor<LocalIsolate> instance_desc(
- kMinimumPrototypePropertiesCount);
+ ObjectDescriptor<IsolateT> static_desc(kMinimumClassPropertiesCount);
+ ObjectDescriptor<IsolateT> instance_desc(kMinimumPrototypePropertiesCount);
for (int i = 0; i < expr->public_members()->length(); i++) {
ClassLiteral::Property* property = expr->public_members()->at(i);
- ObjectDescriptor<LocalIsolate>& desc =
+ ObjectDescriptor<IsolateT>& desc =
property->is_static() ? static_desc : instance_desc;
if (property->is_computed_name()) {
if (property->kind() != ClassLiteral::Property::FIELD) {
@@ -678,7 +676,7 @@ Handle<ClassBoilerplate> ClassBoilerplate::BuildClassBoilerplate(
continue;
}
- ObjectDescriptor<LocalIsolate>& desc =
+ ObjectDescriptor<IsolateT>& desc =
property->is_static() ? static_desc : instance_desc;
if (property->is_computed_name()) {
int computed_name_index = dynamic_argument_index;
diff --git a/chromium/v8/src/objects/literal-objects.h b/chromium/v8/src/objects/literal-objects.h
index 3377bcd4c2b..a20347c4a7e 100644
--- a/chromium/v8/src/objects/literal-objects.h
+++ b/chromium/v8/src/objects/literal-objects.h
@@ -122,20 +122,20 @@ class ClassBoilerplate : public FixedArray {
DECL_ACCESSORS(instance_elements_template, Object)
DECL_ACCESSORS(instance_computed_properties, FixedArray)
- template <typename LocalIsolate, typename Dictionary>
- static void AddToPropertiesTemplate(LocalIsolate* isolate,
+ template <typename IsolateT, typename Dictionary>
+ static void AddToPropertiesTemplate(IsolateT* isolate,
Handle<Dictionary> dictionary,
Handle<Name> name, int key_index,
ValueKind value_kind, Smi value);
- template <typename LocalIsolate>
- static void AddToElementsTemplate(LocalIsolate* isolate,
+ template <typename IsolateT>
+ static void AddToElementsTemplate(IsolateT* isolate,
Handle<NumberDictionary> dictionary,
uint32_t key, int key_index,
ValueKind value_kind, Smi value);
- template <typename LocalIsolate>
- static Handle<ClassBoilerplate> BuildClassBoilerplate(LocalIsolate* isolate,
+ template <typename IsolateT>
+ static Handle<ClassBoilerplate> BuildClassBoilerplate(IsolateT* isolate,
ClassLiteral* expr);
enum {
diff --git a/chromium/v8/src/objects/lookup-inl.h b/chromium/v8/src/objects/lookup-inl.h
index 5f2fbd4cc20..84a16279851 100644
--- a/chromium/v8/src/objects/lookup-inl.h
+++ b/chromium/v8/src/objects/lookup-inl.h
@@ -169,7 +169,8 @@ Handle<Name> LookupIterator::GetName() {
bool LookupIterator::IsElement(JSReceiver object) const {
return index_ <= JSObject::kMaxElementIndex ||
- (index_ != kInvalidIndex && object.map().has_typed_array_elements());
+ (index_ != kInvalidIndex &&
+ object.map().has_typed_array_or_rab_gsab_typed_array_elements());
}
bool LookupIterator::is_dictionary_holder() const {
@@ -209,7 +210,7 @@ bool LookupIterator::IsCacheableTransition() {
// static
void LookupIterator::UpdateProtector(Isolate* isolate, Handle<Object> receiver,
Handle<Name> name) {
- RuntimeCallTimerScope scope(isolate, RuntimeCallCounterId::kUpdateProtector);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kUpdateProtector);
// This list must be kept in sync with
// CodeStubAssembler::CheckForAssociatedProtector!
diff --git a/chromium/v8/src/objects/lookup.cc b/chromium/v8/src/objects/lookup.cc
index d81d0059336..8f8c6081dfd 100644
--- a/chromium/v8/src/objects/lookup.cc
+++ b/chromium/v8/src/objects/lookup.cc
@@ -15,6 +15,7 @@
#include "src/objects/field-type.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/heap-number-inl.h"
+#include "src/objects/map-updater.h"
#include "src/objects/ordered-hash-table.h"
#include "src/objects/struct-inl.h"
@@ -471,7 +472,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
Handle<Map> old_map(holder_obj->map(isolate_), isolate_);
// Force mutable to avoid changing constant value by reconfiguring
// kData -> kAccessor -> kData.
- Handle<Map> new_map = Map::ReconfigureExistingProperty(
+ Handle<Map> new_map = MapUpdater::ReconfigureExistingProperty(
isolate_, old_map, descriptor_number(), i::kData, attributes,
PropertyConstness::kMutable);
if (!new_map->is_dictionary_map()) {
@@ -487,11 +488,15 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
if (!IsElement(*holder) && !holder_obj->HasFastProperties(isolate_)) {
if (holder_obj->map(isolate_).is_prototype_map() &&
- (property_details_.attributes() & READ_ONLY) == 0 &&
- (attributes & READ_ONLY) != 0) {
+ (((property_details_.attributes() & READ_ONLY) == 0 &&
+ (attributes & READ_ONLY) != 0) ||
+ (property_details_.attributes() & DONT_ENUM) !=
+ (attributes & DONT_ENUM))) {
// Invalidate prototype validity cell when a property is reconfigured
// from writable to read-only as this may invalidate transitioning store
// IC handlers.
+ // Invalidate prototype validity cell when a property changes
+ // enumerability to clear the prototype chain enum cache.
JSObject::InvalidatePrototypeChains(holder->map(isolate_));
}
if (holder_obj->IsJSGlobalObject(isolate_)) {
@@ -688,10 +693,10 @@ void LookupIterator::Delete() {
} else {
DCHECK(!name()->IsPrivateName(isolate_));
bool is_prototype_map = holder->map(isolate_).is_prototype_map();
- RuntimeCallTimerScope stats_scope(
- isolate_, is_prototype_map
- ? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
- : RuntimeCallCounterId::kObject_DeleteProperty);
+ RCS_SCOPE(isolate_,
+ is_prototype_map
+ ? RuntimeCallCounterId::kPrototypeObject_DeleteProperty
+ : RuntimeCallCounterId::kObject_DeleteProperty);
PropertyNormalizationMode mode =
is_prototype_map ? KEEP_INOBJECT_PROPERTIES : CLEAR_INOBJECT_PROPERTIES;
@@ -1026,7 +1031,9 @@ void LookupIterator::WriteDataValue(Handle<Object> value,
GlobalDictionary dictionary =
JSGlobalObject::cast(*holder).global_dictionary(isolate_, kAcquireLoad);
PropertyCell cell = dictionary.CellAt(isolate_, dictionary_entry());
- DCHECK_EQ(cell.value(), *value);
+ DCHECK(cell.value() == *value ||
+ (cell.value().IsString() && value->IsString() &&
+ String::cast(cell.value()).Equals(String::cast(*value))));
#endif // DEBUG
} else {
DCHECK_IMPLIES(holder->IsJSProxy(isolate_), name()->IsPrivate(isolate_));
@@ -1247,13 +1254,13 @@ bool LookupIterator::LookupCachedProperty(Handle<AccessorPair> accessor_pair) {
DCHECK_EQ(state(), LookupIterator::ACCESSOR);
DCHECK(GetAccessors()->IsAccessorPair(isolate_));
- Handle<Object> getter(accessor_pair->getter(isolate_), isolate());
- MaybeHandle<Name> maybe_name =
- FunctionTemplateInfo::TryGetCachedPropertyName(isolate(), getter);
- if (maybe_name.is_null()) return false;
+ base::Optional<Name> maybe_name =
+ FunctionTemplateInfo::TryGetCachedPropertyName(
+ isolate(), accessor_pair->getter(isolate_));
+ if (!maybe_name.has_value()) return false;
// We have found a cached property! Modify the iterator accordingly.
- name_ = maybe_name.ToHandleChecked();
+ name_ = handle(maybe_name.value(), isolate_);
Restart();
CHECK_EQ(state(), LookupIterator::DATA);
return true;
@@ -1357,7 +1364,7 @@ ConcurrentLookupIterator::TryGetOwnConstantElement(
// The access guard below protects only internalized string accesses.
// TODO(jgruber): Support other string kinds.
- Map wrapped_string_map = wrapped_string.synchronized_map(isolate);
+ Map wrapped_string_map = wrapped_string.map(isolate, kAcquireLoad);
if (!InstanceTypeChecker::IsInternalizedString(
wrapped_string_map.instance_type())) {
return kGaveUp;
diff --git a/chromium/v8/src/objects/map-inl.h b/chromium/v8/src/objects/map-inl.h
index eb28f0b111f..96626f28eb3 100644
--- a/chromium/v8/src/objects/map-inl.h
+++ b/chromium/v8/src/objects/map-inl.h
@@ -90,19 +90,21 @@ BIT_FIELD_ACCESSORS(Map, bit_field2, is_immutable_proto,
Map::Bits2::IsImmutablePrototypeBit)
// |bit_field3| fields.
-BIT_FIELD_ACCESSORS(Map, bit_field3, owns_descriptors,
+BIT_FIELD_ACCESSORS(Map, relaxed_bit_field3, owns_descriptors,
Map::Bits3::OwnsDescriptorsBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, is_deprecated, Map::Bits3::IsDeprecatedBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, is_in_retained_map_list,
+BIT_FIELD_ACCESSORS(Map, release_acquire_bit_field3, is_deprecated,
+ Map::Bits3::IsDeprecatedBit)
+BIT_FIELD_ACCESSORS(Map, relaxed_bit_field3, is_in_retained_map_list,
Map::Bits3::IsInRetainedMapListBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, is_prototype_map,
+BIT_FIELD_ACCESSORS(Map, release_acquire_bit_field3, is_prototype_map,
Map::Bits3::IsPrototypeMapBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, is_migration_target,
+BIT_FIELD_ACCESSORS(Map, relaxed_bit_field3, is_migration_target,
Map::Bits3::IsMigrationTargetBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, is_extensible, Map::Bits3::IsExtensibleBit)
+BIT_FIELD_ACCESSORS2(Map, relaxed_bit_field3, bit_field3, is_extensible,
+ Map::Bits3::IsExtensibleBit)
BIT_FIELD_ACCESSORS(Map, bit_field3, may_have_interesting_symbols,
Map::Bits3::MayHaveInterestingSymbolsBit)
-BIT_FIELD_ACCESSORS(Map, bit_field3, construction_counter,
+BIT_FIELD_ACCESSORS(Map, relaxed_bit_field3, construction_counter,
Map::Bits3::ConstructionCounterBits)
DEF_GETTER(Map, GetNamedInterceptor, InterceptorInfo) {
@@ -117,11 +119,18 @@ DEF_GETTER(Map, GetIndexedInterceptor, InterceptorInfo) {
return InterceptorInfo::cast(info.GetIndexedPropertyHandler(cage_base));
}
+// static
bool Map::IsMostGeneralFieldType(Representation representation,
FieldType field_type) {
return !representation.IsHeapObject() || field_type.IsAny();
}
+// static
+bool Map::FieldTypeIsCleared(Representation rep, FieldType type) {
+ return type.IsNone() && rep.IsHeapObject();
+}
+
+// static
bool Map::CanHaveFastTransitionableElementsKind(InstanceType instance_type) {
return instance_type == JS_ARRAY_TYPE ||
instance_type == JS_PRIMITIVE_WRAPPER_TYPE ||
@@ -196,14 +205,15 @@ InternalIndex Map::LastAdded() const {
}
int Map::NumberOfOwnDescriptors() const {
- return Bits3::NumberOfOwnDescriptorsBits::decode(bit_field3());
+ return Bits3::NumberOfOwnDescriptorsBits::decode(
+ release_acquire_bit_field3());
}
void Map::SetNumberOfOwnDescriptors(int number) {
DCHECK_LE(number, instance_descriptors().number_of_descriptors());
CHECK_LE(static_cast<unsigned>(number),
static_cast<unsigned>(kMaxNumberOfDescriptors));
- set_bit_field3(
+ set_release_acquire_bit_field3(
Bits3::NumberOfOwnDescriptorsBits::update(bit_field3(), number));
}
@@ -221,7 +231,7 @@ void Map::SetEnumLength(int length) {
CHECK_LE(static_cast<unsigned>(length),
static_cast<unsigned>(kMaxNumberOfDescriptors));
}
- set_bit_field3(Bits3::EnumLengthBits::update(bit_field3(), length));
+ set_relaxed_bit_field3(Bits3::EnumLengthBits::update(bit_field3(), length));
}
FixedArrayBase Map::GetInitialElements() const {
@@ -229,7 +239,7 @@ FixedArrayBase Map::GetInitialElements() const {
if (has_fast_elements() || has_fast_string_wrapper_elements() ||
has_any_nonextensible_elements()) {
result = GetReadOnlyRoots().empty_fixed_array();
- } else if (has_typed_array_elements()) {
+ } else if (has_typed_array_or_rab_gsab_typed_array_elements()) {
result = GetReadOnlyRoots().empty_byte_array();
} else if (has_dictionary_elements()) {
result = GetReadOnlyRoots().empty_slow_element_dictionary();
@@ -271,16 +281,28 @@ void Map::set_instance_size(int value) {
}
int Map::inobject_properties_start_or_constructor_function_index() const {
- return RELAXED_READ_BYTE_FIELD(
- *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+ if (V8_CONCURRENT_MARKING_BOOL) {
+ // TODO(solanes, v8:7790, v8:11353): Make this and the setter non-atomic
+ // when TSAN sees the map's store synchronization.
+ return RELAXED_READ_BYTE_FIELD(
+ *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+ } else {
+ return ReadField<byte>(
+ kInObjectPropertiesStartOrConstructorFunctionIndexOffset);
+ }
}
void Map::set_inobject_properties_start_or_constructor_function_index(
int value) {
CHECK_LT(static_cast<unsigned>(value), 256);
- RELAXED_WRITE_BYTE_FIELD(
- *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
- static_cast<byte>(value));
+ if (V8_CONCURRENT_MARKING_BOOL) {
+ RELAXED_WRITE_BYTE_FIELD(
+ *this, kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
+ static_cast<byte>(value));
+ } else {
+ WriteField<byte>(kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
+ static_cast<byte>(value));
+ }
}
int Map::GetInObjectPropertiesStartInWords() const {
@@ -318,15 +340,23 @@ Handle<Map> Map::AddMissingTransitionsForTesting(
return AddMissingTransitions(isolate, split_map, descriptors);
}
-// TODO(solanes, v8:7790, v8:11353): Make the instance_type accessors non-atomic
-// when TSAN sees the map's store synchronization.
InstanceType Map::instance_type() const {
- return static_cast<InstanceType>(
- RELAXED_READ_UINT16_FIELD(*this, kInstanceTypeOffset));
+ if (V8_CONCURRENT_MARKING_BOOL) {
+ // TODO(solanes, v8:7790, v8:11353): Make this and the setter non-atomic
+ // when TSAN sees the map's store synchronization.
+ return static_cast<InstanceType>(
+ RELAXED_READ_UINT16_FIELD(*this, kInstanceTypeOffset));
+ } else {
+ return static_cast<InstanceType>(ReadField<uint16_t>(kInstanceTypeOffset));
+ }
}
void Map::set_instance_type(InstanceType value) {
- RELAXED_WRITE_UINT16_FIELD(*this, kInstanceTypeOffset, value);
+ if (V8_CONCURRENT_MARKING_BOOL) {
+ RELAXED_WRITE_UINT16_FIELD(*this, kInstanceTypeOffset, value);
+ } else {
+ WriteField<uint16_t>(kInstanceTypeOffset, value);
+ }
}
int Map::UnusedPropertyFields() const {
@@ -451,7 +481,13 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
byte Map::bit_field() const { return ReadField<byte>(kBitFieldOffset); }
void Map::set_bit_field(byte value) {
- WriteField<byte>(kBitFieldOffset, value);
+ if (V8_CONCURRENT_MARKING_BOOL) {
+ // TODO(solanes, v8:7790, v8:11353): Make this non-atomic when TSAN sees the
+ // map's store synchronization.
+ set_relaxed_bit_field(value);
+ } else {
+ WriteField<byte>(kBitFieldOffset, value);
+ }
}
byte Map::relaxed_bit_field() const {
@@ -468,6 +504,40 @@ void Map::set_bit_field2(byte value) {
WriteField<byte>(kBitField2Offset, value);
}
+uint32_t Map::bit_field3() const {
+ if (V8_CONCURRENT_MARKING_BOOL) {
+ // TODO(solanes, v8:7790, v8:11353): Make this and the setter non-atomic
+ // when TSAN sees the map's store synchronization.
+ return relaxed_bit_field3();
+ } else {
+ return ReadField<uint32_t>(kBitField3Offset);
+ }
+}
+
+void Map::set_bit_field3(uint32_t value) {
+ if (V8_CONCURRENT_MARKING_BOOL) {
+ set_relaxed_bit_field3(value);
+ } else {
+ WriteField<uint32_t>(kBitField3Offset, value);
+ }
+}
+
+uint32_t Map::relaxed_bit_field3() const {
+ return RELAXED_READ_UINT32_FIELD(*this, kBitField3Offset);
+}
+
+void Map::set_relaxed_bit_field3(uint32_t value) {
+ RELAXED_WRITE_UINT32_FIELD(*this, kBitField3Offset, value);
+}
+
+uint32_t Map::release_acquire_bit_field3() const {
+ return ACQUIRE_READ_UINT32_FIELD(*this, kBitField3Offset);
+}
+
+void Map::set_release_acquire_bit_field3(uint32_t value) {
+ RELEASE_WRITE_UINT32_FIELD(*this, kBitField3Offset, value);
+}
+
bool Map::is_abandoned_prototype_map() const {
return is_prototype_map() && !owns_descriptors();
}
@@ -523,6 +593,14 @@ bool Map::has_typed_array_elements() const {
return IsTypedArrayElementsKind(elements_kind());
}
+bool Map::has_rab_gsab_typed_array_elements() const {
+ return IsRabGsabTypedArrayElementsKind(elements_kind());
+}
+
+bool Map::has_typed_array_or_rab_gsab_typed_array_elements() const {
+ return IsTypedArrayOrRabGsabTypedArrayElementsKind(elements_kind());
+}
+
bool Map::has_dictionary_elements() const {
return IsDictionaryElementsKind(elements_kind());
}
@@ -551,15 +629,16 @@ void Map::set_is_dictionary_map(bool value) {
}
bool Map::is_dictionary_map() const {
- return Bits3::IsDictionaryMapBit::decode(bit_field3());
+ return Bits3::IsDictionaryMapBit::decode(relaxed_bit_field3());
}
void Map::mark_unstable() {
- set_bit_field3(Bits3::IsUnstableBit::update(bit_field3(), true));
+ set_release_acquire_bit_field3(
+ Bits3::IsUnstableBit::update(bit_field3(), true));
}
bool Map::is_stable() const {
- return !Bits3::IsUnstableBit::decode(bit_field3());
+ return !Bits3::IsUnstableBit::decode(release_acquire_bit_field3());
}
bool Map::CanBeDeprecated() const {
@@ -616,14 +695,6 @@ void Map::InitializeDescriptors(Isolate* isolate, DescriptorArray descriptors) {
descriptors.number_of_descriptors());
}
-void Map::set_bit_field3(uint32_t bits) {
- RELEASE_WRITE_UINT32_FIELD(*this, kBitField3Offset, bits);
-}
-
-uint32_t Map::bit_field3() const {
- return ACQUIRE_READ_UINT32_FIELD(*this, kBitField3Offset);
-}
-
void Map::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) == 0) return;
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
@@ -698,6 +769,9 @@ ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
ACCESSORS_CHECKED(Map, native_context, NativeContext,
kConstructorOrBackPointerOrNativeContextOffset,
IsContextMap())
+ACCESSORS_CHECKED(Map, native_context_or_null, Object,
+ kConstructorOrBackPointerOrNativeContextOffset,
+ (value.IsNull() || value.IsNativeContext()) && IsContextMap())
#if V8_ENABLE_WEBASSEMBLY
ACCESSORS_CHECKED(Map, wasm_type_info, WasmTypeInfo,
kConstructorOrBackPointerOrNativeContextOffset,
diff --git a/chromium/v8/src/objects/map-updater.cc b/chromium/v8/src/objects/map-updater.cc
index 8ab15451a77..feb060fa518 100644
--- a/chromium/v8/src/objects/map-updater.cc
+++ b/chromium/v8/src/objects/map-updater.cc
@@ -4,6 +4,10 @@
#include "src/objects/map-updater.h"
+#include <queue>
+
+#include "src/base/platform/mutex.h"
+#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/objects/field-type.h"
@@ -23,6 +27,66 @@ inline bool EqualImmutableValues(Object obj1, Object obj2) {
return false;
}
+V8_WARN_UNUSED_RESULT Handle<FieldType> GeneralizeFieldType(
+ Representation rep1, Handle<FieldType> type1, Representation rep2,
+ Handle<FieldType> type2, Isolate* isolate) {
+ // Cleared field types need special treatment. They represent lost knowledge,
+ // so we must be conservative, so their generalization with any other type
+ // is "Any".
+ if (Map::FieldTypeIsCleared(rep1, *type1) ||
+ Map::FieldTypeIsCleared(rep2, *type2)) {
+ return FieldType::Any(isolate);
+ }
+ if (type1->NowIs(type2)) return type2;
+ if (type2->NowIs(type1)) return type1;
+ return FieldType::Any(isolate);
+}
+
+void PrintGeneralization(
+ Isolate* isolate, Handle<Map> map, FILE* file, const char* reason,
+ InternalIndex modify_index, int split, int descriptors,
+ bool descriptor_to_field, Representation old_representation,
+ Representation new_representation, PropertyConstness old_constness,
+ PropertyConstness new_constness, MaybeHandle<FieldType> old_field_type,
+ MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
+ MaybeHandle<Object> new_value) {
+ OFStream os(file);
+ os << "[generalizing]";
+ Name name = map->instance_descriptors(isolate).GetKey(modify_index);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
+ } else {
+ os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
+ }
+ os << ":";
+ if (descriptor_to_field) {
+ os << "c";
+ } else {
+ os << old_representation.Mnemonic() << "{";
+ if (old_field_type.is_null()) {
+ os << Brief(*(old_value.ToHandleChecked()));
+ } else {
+ old_field_type.ToHandleChecked()->PrintTo(os);
+ }
+ os << ";" << old_constness << "}";
+ }
+ os << "->" << new_representation.Mnemonic() << "{";
+ if (new_field_type.is_null()) {
+ os << Brief(*(new_value.ToHandleChecked()));
+ } else {
+ new_field_type.ToHandleChecked()->PrintTo(os);
+ }
+ os << ";" << new_constness << "} (";
+ if (strlen(reason) > 0) {
+ os << reason;
+ } else {
+ os << "+" << (descriptors - split) << " maps";
+ }
+ os << ") [";
+ JavaScriptFrame::PrintTop(isolate, file, false, true);
+ os << "]\n";
+}
+
} // namespace
MapUpdater::MapUpdater(Isolate* isolate, Handle<Map> old_map)
@@ -142,8 +206,8 @@ Handle<Map> MapUpdater::ReconfigureToDataField(InternalIndex descriptor,
old_details.location(), new_representation_);
new_field_type_ =
- Map::GeneralizeFieldType(old_representation, old_field_type,
- new_representation_, field_type, isolate_);
+ GeneralizeFieldType(old_representation, old_field_type,
+ new_representation_, field_type, isolate_);
} else {
// We don't know if this is a first property kind reconfiguration
// and we don't know which value was in this property previously
@@ -222,8 +286,8 @@ void MapUpdater::GeneralizeField(Handle<Map> map, InternalIndex modify_index,
PropertyConstness new_constness,
Representation new_representation,
Handle<FieldType> new_field_type) {
- Map::GeneralizeField(isolate_, map, modify_index, new_constness,
- new_representation, new_field_type);
+ GeneralizeField(isolate_, map, modify_index, new_constness,
+ new_representation, new_field_type);
DCHECK(*old_descriptors_ == old_map_->instance_descriptors(isolate_) ||
*old_descriptors_ ==
@@ -237,6 +301,23 @@ MapUpdater::State MapUpdater::Normalize(const char* reason) {
return state_; // Done.
}
+void MapUpdater::ShrinkInstanceSize(base::SharedMutex* map_updater_access,
+ Map map, int slack) {
+ DCHECK_GE(slack, 0);
+#ifdef DEBUG
+ int old_visitor_id = Map::GetVisitorId(map);
+ int new_unused = map.UnusedPropertyFields() - slack;
+#endif
+
+ {
+ base::SharedMutexGuard<base::kExclusive> mutex_guard(map_updater_access);
+ map.set_instance_size(map.InstanceSizeFromSlack(slack));
+ }
+ map.set_construction_counter(Map::kNoSlackTracking);
+ DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
+ DCHECK_EQ(new_unused, map.UnusedPropertyFields());
+}
+
MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
// Updating deprecated maps in-place doesn't make sense.
if (old_map_->is_deprecated()) return state_;
@@ -262,9 +343,9 @@ MapUpdater::State MapUpdater::TryReconfigureToDataFieldInplace() {
DCHECK_EQ(new_attributes_, old_details.attributes());
DCHECK_EQ(kField, old_details.location());
if (FLAG_trace_generalization) {
- old_map_->PrintGeneralization(
- isolate_, stdout, "uninitialized field", modified_descriptor_, old_nof_,
- old_nof_, false, old_representation, new_representation_,
+ PrintGeneralization(
+ isolate_, old_map_, stdout, "uninitialized field", modified_descriptor_,
+ old_nof_, old_nof_, false, old_representation, new_representation_,
old_details.constness(), new_constness_,
handle(old_descriptors_->GetFieldType(modified_descriptor_), isolate_),
MaybeHandle<Object>(), new_field_type_, MaybeHandle<Object>());
@@ -615,9 +696,9 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
GetOrComputeFieldType(target_descriptors, i,
target_details.location(), next_representation);
- Handle<FieldType> next_field_type = Map::GeneralizeFieldType(
- old_details.representation(), old_field_type, next_representation,
- target_field_type, isolate_);
+ Handle<FieldType> next_field_type =
+ GeneralizeFieldType(old_details.representation(), old_field_type,
+ next_representation, target_field_type, isolate_);
Map::GeneralizeIfCanHaveTransitionableFastElementsKind(
isolate_, instance_type, &next_representation, &next_field_type);
@@ -795,8 +876,9 @@ MapUpdater::State MapUpdater::ConstructNewMap() {
isolate_);
}
- old_map_->PrintGeneralization(
- isolate_, stdout, "", modified_descriptor_, split_nof, old_nof_,
+ PrintGeneralization(
+ isolate_, old_map_, stdout, "", modified_descriptor_, split_nof,
+ old_nof_,
old_details.location() == kDescriptor && new_location_ == kField,
old_details.representation(), new_details.representation(),
old_details.constness(), new_details.constness(), old_field_type,
@@ -840,5 +922,179 @@ MapUpdater::State MapUpdater::ConstructNewMapWithIntegrityLevelTransition() {
return state_;
}
+namespace {
+
+void PrintReconfiguration(Isolate* isolate, Handle<Map> map, FILE* file,
+ InternalIndex modify_index, PropertyKind kind,
+ PropertyAttributes attributes) {
+ OFStream os(file);
+ os << "[reconfiguring]";
+ Name name = map->instance_descriptors(isolate).GetKey(modify_index);
+ if (name.IsString()) {
+ String::cast(name).PrintOn(file);
+ } else {
+ os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
+ }
+ os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
+ os << attributes << " [";
+ JavaScriptFrame::PrintTop(isolate, file, false, true);
+ os << "]\n";
+}
+
+} // namespace
+
+// static
+Handle<Map> MapUpdater::ReconfigureExistingProperty(
+ Isolate* isolate, Handle<Map> map, InternalIndex descriptor,
+ PropertyKind kind, PropertyAttributes attributes,
+ PropertyConstness constness) {
+ // Dictionaries have to be reconfigured in-place.
+ DCHECK(!map->is_dictionary_map());
+ DCHECK_EQ(kData, kind); // Only kData case is supported so far.
+
+ if (!map->GetBackPointer().IsMap()) {
+ // There is no benefit from reconstructing transition tree for maps without
+ // back pointers, normalize and try to hit the map cache instead.
+ return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
+ "Normalize_AttributesMismatchProtoMap");
+ }
+
+ if (FLAG_trace_generalization) {
+ PrintReconfiguration(isolate, map, stdout, descriptor, kind, attributes);
+ }
+
+ return MapUpdater{isolate, map}.ReconfigureToDataField(
+ descriptor, attributes, constness, Representation::None(),
+ FieldType::None(isolate));
+}
+
+// static
+void MapUpdater::UpdateFieldType(Isolate* isolate, Handle<Map> map,
+ InternalIndex descriptor, Handle<Name> name,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ const MaybeObjectHandle& new_wrapped_type) {
+ DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
+ // We store raw pointers in the queue, so no allocations are allowed.
+ DisallowGarbageCollection no_gc;
+ PropertyDetails details =
+ map->instance_descriptors(isolate).GetDetails(descriptor);
+ if (details.location() != kField) return;
+ DCHECK_EQ(kData, details.kind());
+
+ if (new_constness != details.constness() && map->is_prototype_map()) {
+ JSObject::InvalidatePrototypeChains(*map);
+ }
+
+ std::queue<Map> backlog;
+ backlog.push(*map);
+
+ while (!backlog.empty()) {
+ Map current = backlog.front();
+ backlog.pop();
+
+ TransitionsAccessor transitions(isolate, current, &no_gc);
+ int num_transitions = transitions.NumberOfTransitions();
+ for (int i = 0; i < num_transitions; ++i) {
+ Map target = transitions.GetTarget(i);
+ backlog.push(target);
+ }
+ DescriptorArray descriptors = current.instance_descriptors(isolate);
+ PropertyDetails details = descriptors.GetDetails(descriptor);
+
+ // It is allowed to change representation here only from None
+ // to something or from Smi or HeapObject to Tagged.
+ DCHECK(details.representation().Equals(new_representation) ||
+ details.representation().CanBeInPlaceChangedTo(new_representation));
+
+ // Skip if already updated the shared descriptor.
+ if (new_constness != details.constness() ||
+ !new_representation.Equals(details.representation()) ||
+ descriptors.GetFieldType(descriptor) != *new_wrapped_type.object()) {
+ Descriptor d = Descriptor::DataField(
+ name, descriptors.GetFieldIndex(descriptor), details.attributes(),
+ new_constness, new_representation, new_wrapped_type);
+ descriptors.Replace(descriptor, &d);
+ }
+ }
+}
+
+// TODO(jgruber): Lock the map-updater mutex.
+// static
+void MapUpdater::GeneralizeField(Isolate* isolate, Handle<Map> map,
+ InternalIndex modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ DCHECK(!map->is_deprecated());
+
+ // Check if we actually need to generalize the field type at all.
+ Handle<DescriptorArray> old_descriptors(map->instance_descriptors(isolate),
+ isolate);
+ PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+ PropertyConstness old_constness = old_details.constness();
+ Representation old_representation = old_details.representation();
+ Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
+ isolate);
+
+ // Return if the current map is general enough to hold requested constness and
+ // representation/field type.
+ if (IsGeneralizableTo(new_constness, old_constness) &&
+ old_representation.Equals(new_representation) &&
+ !Map::FieldTypeIsCleared(new_representation, *new_field_type) &&
+ // Checking old_field_type for being cleared is not necessary because
+ // the NowIs check below would fail anyway in that case.
+ new_field_type->NowIs(old_field_type)) {
+ DCHECK(GeneralizeFieldType(old_representation, old_field_type,
+ new_representation, new_field_type, isolate)
+ ->NowIs(old_field_type));
+ return;
+ }
+
+ // Determine the field owner.
+ Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
+ Handle<DescriptorArray> descriptors(
+ field_owner->instance_descriptors(isolate), isolate);
+ DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
+
+ new_field_type =
+ GeneralizeFieldType(old_representation, old_field_type,
+ new_representation, new_field_type, isolate);
+
+ new_constness = GeneralizeConstness(old_constness, new_constness);
+
+ PropertyDetails details = descriptors->GetDetails(modify_index);
+ Handle<Name> name(descriptors->GetKey(modify_index), isolate);
+
+ MaybeObjectHandle wrapped_type(Map::WrapFieldType(isolate, new_field_type));
+ UpdateFieldType(isolate, field_owner, modify_index, name, new_constness,
+ new_representation, wrapped_type);
+
+ if (new_constness != old_constness) {
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kFieldConstGroup);
+ }
+
+ if (!new_field_type->Equals(*old_field_type)) {
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kFieldTypeGroup);
+ }
+
+ if (!new_representation.Equals(old_representation)) {
+ field_owner->dependent_code().DeoptimizeDependentCodeGroup(
+ DependentCode::kFieldRepresentationGroup);
+ }
+
+ if (FLAG_trace_generalization) {
+ PrintGeneralization(
+ isolate, map, stdout, "field type generalization", modify_index,
+ map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
+ details.representation(),
+ descriptors->GetDetails(modify_index).representation(), old_constness,
+ new_constness, old_field_type, MaybeHandle<Object>(), new_field_type,
+ MaybeHandle<Object>());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/objects/map-updater.h b/chromium/v8/src/objects/map-updater.h
index 427ddc90e83..c901782bf17 100644
--- a/chromium/v8/src/objects/map-updater.h
+++ b/chromium/v8/src/objects/map-updater.h
@@ -67,6 +67,22 @@ class V8_EXPORT_PRIVATE MapUpdater {
// version and performs the steps 1-6.
Handle<Map> Update();
+ static Handle<Map> ReconfigureExistingProperty(Isolate* isolate,
+ Handle<Map> map,
+ InternalIndex descriptor,
+ PropertyKind kind,
+ PropertyAttributes attributes,
+ PropertyConstness constness);
+
+ static void GeneralizeField(Isolate* isolate, Handle<Map> map,
+ InternalIndex modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type);
+
+ static void ShrinkInstanceSize(base::SharedMutex* map_updater_access, Map map,
+ int slack);
+
private:
enum State {
kInitialized,
@@ -167,6 +183,16 @@ class V8_EXPORT_PRIVATE MapUpdater {
Handle<DescriptorArray> descriptors, InternalIndex descriptor,
PropertyLocation location, Representation representation);
+ // Update field type of the given descriptor to new representation and new
+ // type. The type must be prepared for storing in descriptor array:
+ // it must be either a simple type or a map wrapped in a weak cell.
+ static void UpdateFieldType(Isolate* isolate, Handle<Map> map,
+ InternalIndex descriptor_number,
+ Handle<Name> name,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ const MaybeObjectHandle& new_wrapped_type);
+
void GeneralizeField(Handle<Map> map, InternalIndex modify_index,
PropertyConstness new_constness,
Representation new_representation,
diff --git a/chromium/v8/src/objects/map.cc b/chromium/v8/src/objects/map.cc
index 0f281519b79..0857d7beea3 100644
--- a/chromium/v8/src/objects/map.cc
+++ b/chromium/v8/src/objects/map.cc
@@ -10,7 +10,6 @@
#include "src/handles/maybe-handles.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/init/bootstrapper.h"
-#include "src/logging/counters-inl.h"
#include "src/logging/log.h"
#include "src/objects/arguments-inl.h"
#include "src/objects/descriptor-array.h"
@@ -46,34 +45,16 @@ Map Map::GetPrototypeChainRootMap(Isolate* isolate) const {
}
// static
-MaybeHandle<JSFunction> Map::GetConstructorFunction(
- Handle<Map> map, Handle<Context> native_context) {
- if (map->IsPrimitiveMap()) {
- int const constructor_function_index = map->GetConstructorFunctionIndex();
+base::Optional<JSFunction> Map::GetConstructorFunction(Map map,
+ Context native_context) {
+ DisallowGarbageCollection no_gc;
+ if (map.IsPrimitiveMap()) {
+ int const constructor_function_index = map.GetConstructorFunctionIndex();
if (constructor_function_index != kNoConstructorFunctionIndex) {
- return handle(
- JSFunction::cast(native_context->get(constructor_function_index)),
- native_context->GetIsolate());
+ return JSFunction::cast(native_context.get(constructor_function_index));
}
}
- return MaybeHandle<JSFunction>();
-}
-
-void Map::PrintReconfiguration(Isolate* isolate, FILE* file,
- InternalIndex modify_index, PropertyKind kind,
- PropertyAttributes attributes) {
- OFStream os(file);
- os << "[reconfiguring]";
- Name name = instance_descriptors(isolate).GetKey(modify_index);
- if (name.IsString()) {
- String::cast(name).PrintOn(file);
- } else {
- os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
- }
- os << ": " << (kind == kData ? "kData" : "ACCESSORS") << ", attrs: ";
- os << attributes << " [";
- JavaScriptFrame::PrintTop(isolate, file, false, true);
- os << "]\n";
+ return {};
}
Map Map::GetInstanceTypeMap(ReadOnlyRoots roots, InstanceType type) {
@@ -373,6 +354,10 @@ VisitorId Map::GetVisitorId(Map map) {
return kVisitWasmStruct;
case WASM_TYPE_INFO_TYPE:
return kVisitWasmTypeInfo;
+ case WASM_JS_FUNCTION_DATA_TYPE:
+ return kVisitWasmJSFunctionData;
+ case WASM_EXPORTED_FUNCTION_DATA_TYPE:
+ return kVisitWasmExportedFunctionData;
#endif // V8_ENABLE_WEBASSEMBLY
#define MAKE_TQ_CASE(TYPE, Name) \
@@ -386,51 +371,6 @@ VisitorId Map::GetVisitorId(Map map) {
}
}
-void Map::PrintGeneralization(
- Isolate* isolate, FILE* file, const char* reason,
- InternalIndex modify_index, int split, int descriptors,
- bool descriptor_to_field, Representation old_representation,
- Representation new_representation, PropertyConstness old_constness,
- PropertyConstness new_constness, MaybeHandle<FieldType> old_field_type,
- MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
- MaybeHandle<Object> new_value) {
- OFStream os(file);
- os << "[generalizing]";
- Name name = instance_descriptors(isolate).GetKey(modify_index);
- if (name.IsString()) {
- String::cast(name).PrintOn(file);
- } else {
- os << "{symbol " << reinterpret_cast<void*>(name.ptr()) << "}";
- }
- os << ":";
- if (descriptor_to_field) {
- os << "c";
- } else {
- os << old_representation.Mnemonic() << "{";
- if (old_field_type.is_null()) {
- os << Brief(*(old_value.ToHandleChecked()));
- } else {
- old_field_type.ToHandleChecked()->PrintTo(os);
- }
- os << ";" << old_constness << "}";
- }
- os << "->" << new_representation.Mnemonic() << "{";
- if (new_field_type.is_null()) {
- os << Brief(*(new_value.ToHandleChecked()));
- } else {
- new_field_type.ToHandleChecked()->PrintTo(os);
- }
- os << ";" << new_constness << "} (";
- if (strlen(reason) > 0) {
- os << reason;
- } else {
- os << "+" << (descriptors - split) << " maps";
- }
- os << ") [";
- JavaScriptFrame::PrintTop(isolate, file, false, true);
- os << "]\n";
-}
-
// static
MaybeObjectHandle Map::WrapFieldType(Isolate* isolate, Handle<FieldType> type) {
if (type->IsClass()) {
@@ -661,151 +601,6 @@ Map Map::FindFieldOwner(Isolate* isolate, InternalIndex descriptor) const {
return result;
}
-void Map::UpdateFieldType(Isolate* isolate, InternalIndex descriptor,
- Handle<Name> name, PropertyConstness new_constness,
- Representation new_representation,
- const MaybeObjectHandle& new_wrapped_type) {
- DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeak());
- // We store raw pointers in the queue, so no allocations are allowed.
- DisallowGarbageCollection no_gc;
- PropertyDetails details =
- instance_descriptors(isolate).GetDetails(descriptor);
- if (details.location() != kField) return;
- DCHECK_EQ(kData, details.kind());
-
- if (new_constness != details.constness() && is_prototype_map()) {
- JSObject::InvalidatePrototypeChains(*this);
- }
-
- Zone zone(isolate->allocator(), ZONE_NAME);
- ZoneQueue<Map> backlog(&zone);
- backlog.push(*this);
-
- while (!backlog.empty()) {
- Map current = backlog.front();
- backlog.pop();
-
- TransitionsAccessor transitions(isolate, current, &no_gc);
- int num_transitions = transitions.NumberOfTransitions();
- for (int i = 0; i < num_transitions; ++i) {
- Map target = transitions.GetTarget(i);
- backlog.push(target);
- }
- DescriptorArray descriptors = current.instance_descriptors(isolate);
- PropertyDetails details = descriptors.GetDetails(descriptor);
-
- // It is allowed to change representation here only from None
- // to something or from Smi or HeapObject to Tagged.
- DCHECK(details.representation().Equals(new_representation) ||
- details.representation().CanBeInPlaceChangedTo(new_representation));
-
- // Skip if already updated the shared descriptor.
- if (new_constness != details.constness() ||
- !new_representation.Equals(details.representation()) ||
- descriptors.GetFieldType(descriptor) != *new_wrapped_type.object()) {
- Descriptor d = Descriptor::DataField(
- name, descriptors.GetFieldIndex(descriptor), details.attributes(),
- new_constness, new_representation, new_wrapped_type);
- descriptors.Replace(descriptor, &d);
- }
- }
-}
-
-bool FieldTypeIsCleared(Representation rep, FieldType type) {
- return type.IsNone() && rep.IsHeapObject();
-}
-
-// static
-Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
- Handle<FieldType> type1,
- Representation rep2,
- Handle<FieldType> type2,
- Isolate* isolate) {
- // Cleared field types need special treatment. They represent lost knowledge,
- // so we must be conservative, so their generalization with any other type
- // is "Any".
- if (FieldTypeIsCleared(rep1, *type1) || FieldTypeIsCleared(rep2, *type2)) {
- return FieldType::Any(isolate);
- }
- if (type1->NowIs(type2)) return type2;
- if (type2->NowIs(type1)) return type1;
- return FieldType::Any(isolate);
-}
-
-// static
-void Map::GeneralizeField(Isolate* isolate, Handle<Map> map,
- InternalIndex modify_index,
- PropertyConstness new_constness,
- Representation new_representation,
- Handle<FieldType> new_field_type) {
- // Check if we actually need to generalize the field type at all.
- Handle<DescriptorArray> old_descriptors(map->instance_descriptors(isolate),
- isolate);
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- PropertyConstness old_constness = old_details.constness();
- Representation old_representation = old_details.representation();
- Handle<FieldType> old_field_type(old_descriptors->GetFieldType(modify_index),
- isolate);
-
- // Return if the current map is general enough to hold requested constness and
- // representation/field type.
- if (IsGeneralizableTo(new_constness, old_constness) &&
- old_representation.Equals(new_representation) &&
- !FieldTypeIsCleared(new_representation, *new_field_type) &&
- // Checking old_field_type for being cleared is not necessary because
- // the NowIs check below would fail anyway in that case.
- new_field_type->NowIs(old_field_type)) {
- DCHECK(GeneralizeFieldType(old_representation, old_field_type,
- new_representation, new_field_type, isolate)
- ->NowIs(old_field_type));
- return;
- }
-
- // Determine the field owner.
- Handle<Map> field_owner(map->FindFieldOwner(isolate, modify_index), isolate);
- Handle<DescriptorArray> descriptors(
- field_owner->instance_descriptors(isolate), isolate);
- DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
-
- new_field_type =
- Map::GeneralizeFieldType(old_representation, old_field_type,
- new_representation, new_field_type, isolate);
-
- new_constness = GeneralizeConstness(old_constness, new_constness);
-
- PropertyDetails details = descriptors->GetDetails(modify_index);
- Handle<Name> name(descriptors->GetKey(modify_index), isolate);
-
- MaybeObjectHandle wrapped_type(WrapFieldType(isolate, new_field_type));
- field_owner->UpdateFieldType(isolate, modify_index, name, new_constness,
- new_representation, wrapped_type);
-
- if (new_constness != old_constness) {
- field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kFieldConstGroup);
- }
-
- if (!new_field_type->Equals(*old_field_type)) {
- field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kFieldTypeGroup);
- }
-
- if (!new_representation.Equals(old_representation)) {
- field_owner->dependent_code().DeoptimizeDependentCodeGroup(
- DependentCode::kFieldRepresentationGroup);
- }
-
- if (FLAG_trace_generalization) {
- map->PrintGeneralization(
- isolate, stdout, "field type generalization", modify_index,
- map->NumberOfOwnDescriptors(), map->NumberOfOwnDescriptors(), false,
- details.representation(),
- descriptors->GetDetails(modify_index).representation(), old_constness,
- new_constness, old_field_type, MaybeHandle<Object>(), new_field_type,
- MaybeHandle<Object>());
- }
-}
-
namespace {
Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
@@ -831,7 +626,7 @@ Map SearchMigrationTarget(Isolate* isolate, Map old_map) {
PropertyDetails old_details = old_descriptors.GetDetails(i);
if (old_details.location() == kField && old_details.kind() == kData) {
FieldType old_type = old_descriptors.GetFieldType(i);
- if (FieldTypeIsCleared(old_details.representation(), old_type)) {
+ if (Map::FieldTypeIsCleared(old_details.representation(), old_type)) {
return Map();
}
}
@@ -1362,7 +1157,7 @@ int Map::NumberOfEnumerableProperties() const {
int Map::NextFreePropertyIndex() const {
int number_of_own_descriptors = NumberOfOwnDescriptors();
- DescriptorArray descs = instance_descriptors();
+ DescriptorArray descs = instance_descriptors(kRelaxedLoad);
// Search properties backwards to find the last field.
for (int i = number_of_own_descriptors - 1; i >= 0; --i) {
PropertyDetails details = descs.GetDetails(InternalIndex(i));
@@ -1418,9 +1213,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
Handle<HeapObject> prototype(map->prototype(), isolate);
Map::SetPrototype(isolate, result, prototype);
result->set_constructor_or_back_pointer(map->GetConstructor());
- // TODO(solanes, v8:7790, v8:11353): set_relaxed_bit_field could be an atomic
- // set if TSAN could see the transitions happening in StoreIC.
- result->set_relaxed_bit_field(map->bit_field());
+ result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
new_bit_field3 = Bits3::OwnsDescriptorsBit::update(new_bit_field3, true);
@@ -1432,6 +1225,7 @@ Handle<Map> Map::RawCopy(Isolate* isolate, Handle<Map> map, int instance_size,
if (!map->is_dictionary_map()) {
new_bit_field3 = Bits3::IsUnstableBit::update(new_bit_field3, false);
}
+ // Same as bit_field comment above.
result->set_bit_field3(new_bit_field3);
result->clear_padding();
return result;
@@ -2075,11 +1869,10 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
PropertyAttributes attributes,
PropertyConstness constness,
StoreOrigin store_origin) {
- RuntimeCallTimerScope stats_scope(
- isolate,
- map->IsDetached(isolate)
- ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
- : RuntimeCallCounterId::kMap_TransitionToDataProperty);
+ RCS_SCOPE(isolate,
+ map->IsDetached(isolate)
+ ? RuntimeCallCounterId::kPrototypeMap_TransitionToDataProperty
+ : RuntimeCallCounterId::kMap_TransitionToDataProperty);
DCHECK(name->IsUniqueName());
DCHECK(!map->is_dictionary_map());
@@ -2156,40 +1949,13 @@ Handle<Map> Map::TransitionToDataProperty(Isolate* isolate, Handle<Map> map,
return result;
}
-Handle<Map> Map::ReconfigureExistingProperty(Isolate* isolate, Handle<Map> map,
- InternalIndex descriptor,
- PropertyKind kind,
- PropertyAttributes attributes,
- PropertyConstness constness) {
- // Dictionaries have to be reconfigured in-place.
- DCHECK(!map->is_dictionary_map());
-
- if (!map->GetBackPointer().IsMap()) {
- // There is no benefit from reconstructing transition tree for maps without
- // back pointers, normalize and try to hit the map cache instead.
- return Map::Normalize(isolate, map, CLEAR_INOBJECT_PROPERTIES,
- "Normalize_AttributesMismatchProtoMap");
- }
-
- if (FLAG_trace_generalization) {
- map->PrintReconfiguration(isolate, stdout, descriptor, kind, attributes);
- }
-
- MapUpdater mu(isolate, map);
- DCHECK_EQ(kData, kind); // Only kData case is supported so far.
- Handle<Map> new_map = mu.ReconfigureToDataField(
- descriptor, attributes, constness, Representation::None(),
- FieldType::None(isolate));
- return new_map;
-}
-
Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
Handle<Name> name,
InternalIndex descriptor,
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes) {
- RuntimeCallTimerScope stats_scope(
+ RCS_SCOPE(
isolate,
map->IsDetached(isolate)
? RuntimeCallCounterId::kPrototypeMap_TransitionToAccessorProperty
@@ -2438,13 +2204,6 @@ bool Map::EquivalentToForNormalization(const Map other,
JSObject::GetEmbedderFieldCount(other);
}
-static void GetMinInobjectSlack(Map map, void* data) {
- int slack = map.UnusedPropertyFields();
- if (*reinterpret_cast<int*>(data) > slack) {
- *reinterpret_cast<int*>(data) = slack;
- }
-}
-
int Map::ComputeMinObjectSlack(Isolate* isolate) {
DisallowGarbageCollection no_gc;
// Has to be an initial map.
@@ -2452,27 +2211,13 @@ int Map::ComputeMinObjectSlack(Isolate* isolate) {
int slack = UnusedPropertyFields();
TransitionsAccessor transitions(isolate, *this, &no_gc);
- transitions.TraverseTransitionTree(&GetMinInobjectSlack, &slack);
+ TransitionsAccessor::TraverseCallback callback = [&](Map map) {
+ slack = std::min(slack, map.UnusedPropertyFields());
+ };
+ transitions.TraverseTransitionTree(callback);
return slack;
}
-static void ShrinkInstanceSize(Map map, void* data) {
- int slack = *reinterpret_cast<int*>(data);
- DCHECK_GE(slack, 0);
-#ifdef DEBUG
- int old_visitor_id = Map::GetVisitorId(map);
- int new_unused = map.UnusedPropertyFields() - slack;
-#endif
- map.set_instance_size(map.InstanceSizeFromSlack(slack));
- map.set_construction_counter(Map::kNoSlackTracking);
- DCHECK_EQ(old_visitor_id, Map::GetVisitorId(map));
- DCHECK_EQ(new_unused, map.UnusedPropertyFields());
-}
-
-static void StopSlackTracking(Map map, void* data) {
- map.set_construction_counter(Map::kNoSlackTracking);
-}
-
void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
DisallowGarbageCollection no_gc;
// Has to be an initial map.
@@ -2480,12 +2225,19 @@ void Map::CompleteInobjectSlackTracking(Isolate* isolate) {
int slack = ComputeMinObjectSlack(isolate);
TransitionsAccessor transitions(isolate, *this, &no_gc);
+ TransitionsAccessor::TraverseCallback callback;
if (slack != 0) {
// Resize the initial map and all maps in its transition tree.
- transitions.TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+ callback = [&](Map map) {
+ MapUpdater::ShrinkInstanceSize(isolate->map_updater_access(), map, slack);
+ };
} else {
- transitions.TraverseTransitionTree(&StopSlackTracking, nullptr);
+ callback = [](Map map) {
+ // Stop slack tracking for this map.
+ map.set_construction_counter(Map::kNoSlackTracking);
+ };
}
+ transitions.TraverseTransitionTree(callback);
}
void Map::SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors,
@@ -2583,8 +2335,7 @@ bool Map::IsPrototypeChainInvalidated(Map map) {
void Map::SetPrototype(Isolate* isolate, Handle<Map> map,
Handle<HeapObject> prototype,
bool enable_prototype_setup_mode) {
- RuntimeCallTimerScope stats_scope(isolate,
- RuntimeCallCounterId::kMap_SetPrototype);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kMap_SetPrototype);
if (prototype->IsJSObject()) {
Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
diff --git a/chromium/v8/src/objects/map.h b/chromium/v8/src/objects/map.h
index e31bb9a51f4..83e5dfa365b 100644
--- a/chromium/v8/src/objects/map.h
+++ b/chromium/v8/src/objects/map.h
@@ -68,9 +68,12 @@ enum InstanceType : uint16_t;
V(Symbol) \
V(SyntheticModule) \
V(TransitionArray) \
+ IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmExportedFunctionData) \
+ IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmIndirectFunctionTable) \
IF_WASM(V, WasmInstanceObject) \
- IF_WASM(V, WasmArray) \
+ IF_WASM(V, WasmJSFunctionData) \
IF_WASM(V, WasmStruct) \
IF_WASM(V, WasmTypeInfo) \
V(WeakCell)
@@ -148,8 +151,7 @@ using MapHandles = std::vector<Handle<Map>>;
// | Byte | [bit_field2] |
// | | - new_target_is_base (bit 0) |
// | | - is_immutable_proto (bit 1) |
-// | | - unused bit (bit 2) |
-// | | - elements_kind (bits 3..7) |
+// | | - elements_kind (bits 2..7) |
// +----+----------+-------------------------------------------------+
// | Int | [bit_field3] |
// | | - enum_length (bit 0..9) |
@@ -211,8 +213,8 @@ class Map : public HeapObject {
static const int kNoConstructorFunctionIndex = 0;
inline int GetConstructorFunctionIndex() const;
inline void SetConstructorFunctionIndex(int value);
- static MaybeHandle<JSFunction> GetConstructorFunction(
- Handle<Map> map, Handle<Context> native_context);
+ static base::Optional<JSFunction> GetConstructorFunction(
+ Map map, Context native_context);
// Retrieve interceptors.
DECL_GETTER(GetNamedInterceptor, InterceptorInfo)
@@ -244,7 +246,12 @@ class Map : public HeapObject {
//
// Bit field.
//
+ // The setter in this pair calls the relaxed setter if concurrent marking is
+ // on, or performs the write non-atomically if it's off. The read is always
+ // non-atomically. This is done to have wider TSAN coverage on the cases where
+ // it's possible.
DECL_PRIMITIVE_ACCESSORS(bit_field, byte)
+
// Atomic accessors, used for allowlisting legitimate concurrent accesses.
DECL_PRIMITIVE_ACCESSORS(relaxed_bit_field, byte)
@@ -266,8 +273,14 @@ class Map : public HeapObject {
//
// Bit field 3.
//
+ // {bit_field3} calls the relaxed accessors if concurrent marking is on, or
+ // performs the read/write non-atomically if it's off. This is done to have
+ // wider TSAN coverage on the cases where it's possible.
DECL_PRIMITIVE_ACCESSORS(bit_field3, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(relaxed_bit_field3, uint32_t)
+ DECL_PRIMITIVE_ACCESSORS(release_acquire_bit_field3, uint32_t)
+
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic. Depending on the V8 build mode there could be no padding.
V8_INLINE void clear_padding();
@@ -402,6 +415,8 @@ class Map : public HeapObject {
inline bool has_fast_sloppy_arguments_elements() const;
inline bool has_fast_string_wrapper_elements() const;
inline bool has_typed_array_elements() const;
+ inline bool has_rab_gsab_typed_array_elements() const;
+ inline bool has_typed_array_or_rab_gsab_typed_array_elements() const;
inline bool has_dictionary_elements() const;
inline bool has_any_nonextensible_elements() const;
inline bool has_nonextensible_elements() const;
@@ -488,18 +503,11 @@ class Map : public HeapObject {
bool InstancesNeedRewriting(Map target, int target_number_of_fields,
int target_inobject, int target_unused,
int* old_number_of_fields) const;
- V8_WARN_UNUSED_RESULT static Handle<FieldType> GeneralizeFieldType(
- Representation rep1, Handle<FieldType> type1, Representation rep2,
- Handle<FieldType> type2, Isolate* isolate);
- static void GeneralizeField(Isolate* isolate, Handle<Map> map,
- InternalIndex modify_index,
- PropertyConstness new_constness,
- Representation new_representation,
- Handle<FieldType> new_field_type);
// Returns true if the |field_type| is the most general one for
// given |representation|.
static inline bool IsMostGeneralFieldType(Representation representation,
FieldType field_type);
+ static inline bool FieldTypeIsCleared(Representation rep, FieldType type);
// Generalizes representation and field_type if objects with given
// instance type can have fast elements that can be transitioned by
@@ -554,6 +562,7 @@ class Map : public HeapObject {
// and with the Wasm type info for WebAssembly object maps.
DECL_ACCESSORS(constructor_or_back_pointer, Object)
DECL_ACCESSORS(native_context, NativeContext)
+ DECL_ACCESSORS(native_context_or_null, Object)
DECL_ACCESSORS(wasm_type_info, WasmTypeInfo)
DECL_GETTER(GetConstructor, Object)
DECL_GETTER(GetFunctionTemplateInfo, FunctionTemplateInfo)
@@ -716,10 +725,6 @@ class Map : public HeapObject {
Isolate* isolate, Handle<Map> map, Handle<Name> name,
InternalIndex descriptor, Handle<Object> getter, Handle<Object> setter,
PropertyAttributes attributes);
- V8_EXPORT_PRIVATE static Handle<Map> ReconfigureExistingProperty(
- Isolate* isolate, Handle<Map> map, InternalIndex descriptor,
- PropertyKind kind, PropertyAttributes attributes,
- PropertyConstness constness);
inline void AppendDescriptor(Isolate* isolate, Descriptor* desc);
@@ -912,28 +917,6 @@ class Map : public HeapObject {
void ReplaceDescriptors(Isolate* isolate, DescriptorArray new_descriptors);
- // Update field type of the given descriptor to new representation and new
- // type. The type must be prepared for storing in descriptor array:
- // it must be either a simple type or a map wrapped in a weak cell.
- void UpdateFieldType(Isolate* isolate, InternalIndex descriptor_number,
- Handle<Name> name, PropertyConstness new_constness,
- Representation new_representation,
- const MaybeObjectHandle& new_wrapped_type);
-
- // TODO(ishell): Move to MapUpdater.
- void PrintReconfiguration(Isolate* isolate, FILE* file,
- InternalIndex modify_index, PropertyKind kind,
- PropertyAttributes attributes);
- // TODO(ishell): Move to MapUpdater.
- void PrintGeneralization(
- Isolate* isolate, FILE* file, const char* reason,
- InternalIndex modify_index, int split, int descriptors,
- bool constant_to_field, Representation old_representation,
- Representation new_representation, PropertyConstness old_constness,
- PropertyConstness new_constness, MaybeHandle<FieldType> old_field_type,
- MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
- MaybeHandle<Object> new_value);
-
// This is the equivalent of IsMap() but avoids reading the instance type so
// it can be used concurrently without acquire load.
V8_INLINE bool ConcurrentIsMap(PtrComprCageBase cage_base,
diff --git a/chromium/v8/src/objects/map.tq b/chromium/v8/src/objects/map.tq
index 4cd3f2d67fa..49b2e5be36d 100644
--- a/chromium/v8/src/objects/map.tq
+++ b/chromium/v8/src/objects/map.tq
@@ -16,8 +16,7 @@ bitfield struct MapBitFields1 extends uint8 {
bitfield struct MapBitFields2 extends uint8 {
new_target_is_base: bool: 1 bit;
is_immutable_prototype: bool: 1 bit;
- unused: bool: 1 bit;
- elements_kind: ElementsKind: 5 bit;
+ elements_kind: ElementsKind: 6 bit;
}
bitfield struct MapBitFields3 extends uint32 {
diff --git a/chromium/v8/src/objects/megadom-handler-inl.h b/chromium/v8/src/objects/megadom-handler-inl.h
new file mode 100644
index 00000000000..87efc66f0e5
--- /dev/null
+++ b/chromium/v8/src/objects/megadom-handler-inl.h
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MEGADOM_HANDLER_INL_H_
+#define V8_OBJECTS_MEGADOM_HANDLER_INL_H_
+
+#include "src/objects/megadom-handler.h"
+#include "src/objects/objects-inl.h" // Needed for write barriers
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/megadom-handler-tq-inl.inc"
+
+TQ_OBJECT_CONSTRUCTORS_IMPL(MegaDomHandler)
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MEGADOM_HANDLER_INL_H_
diff --git a/chromium/v8/src/objects/megadom-handler.h b/chromium/v8/src/objects/megadom-handler.h
new file mode 100644
index 00000000000..00de29d0d5f
--- /dev/null
+++ b/chromium/v8/src/objects/megadom-handler.h
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MEGADOM_HANDLER_H_
+#define V8_OBJECTS_MEGADOM_HANDLER_H_
+
+#include "src/objects/heap-object.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+#include "torque-generated/src/objects/megadom-handler-tq.inc"
+
+class MegaDomHandler
+ : public TorqueGeneratedMegaDomHandler<MegaDomHandler, HeapObject> {
+ public:
+ void BriefPrintDetails(std::ostream& os);
+ class BodyDescriptor;
+
+ TQ_OBJECT_CONSTRUCTORS(MegaDomHandler)
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MEGADOM_HANDLER_H_
diff --git a/chromium/v8/src/objects/megadom-handler.tq b/chromium/v8/src/objects/megadom-handler.tq
new file mode 100644
index 00000000000..7daaa5a8d80
--- /dev/null
+++ b/chromium/v8/src/objects/megadom-handler.tq
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+@generateCppClass
+@generatePrint
+@generateBodyDescriptor
+extern class MegaDomHandler extends HeapObject {
+ accessor: MaybeObject;
+ context: MaybeObject;
+}
diff --git a/chromium/v8/src/objects/object-list-macros.h b/chromium/v8/src/objects/object-list-macros.h
index fc3f956d2db..837f895f5fc 100644
--- a/chromium/v8/src/objects/object-list-macros.h
+++ b/chromium/v8/src/objects/object-list-macros.h
@@ -166,6 +166,7 @@ class ZoneForwardList;
V(LoadHandler) \
V(Map) \
V(MapCache) \
+ V(MegaDomHandler) \
V(Module) \
V(Microtask) \
V(Name) \
@@ -217,8 +218,11 @@ class ZoneForwardList;
IF_WASM(V, WasmArray) \
IF_WASM(V, WasmExceptionObject) \
IF_WASM(V, WasmExceptionPackage) \
+ IF_WASM(V, WasmExportedFunctionData) \
+ IF_WASM(V, WasmFunctionData) \
IF_WASM(V, WasmGlobalObject) \
IF_WASM(V, WasmInstanceObject) \
+ IF_WASM(V, WasmJSFunctionData) \
IF_WASM(V, WasmMemoryObject) \
IF_WASM(V, WasmModuleObject) \
IF_WASM(V, WasmStruct) \
diff --git a/chromium/v8/src/objects/object-macros-undef.h b/chromium/v8/src/objects/object-macros-undef.h
index e2c5961ab2a..1d240729ff6 100644
--- a/chromium/v8/src/objects/object-macros-undef.h
+++ b/chromium/v8/src/objects/object-macros-undef.h
@@ -14,10 +14,8 @@
#undef DECL_PRIMITIVE_GETTER
#undef DECL_PRIMITIVE_SETTER
#undef DECL_PRIMITIVE_ACCESSORS
-#undef DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_INT_ACCESSORS
-#undef DECL_SYNCHRONIZED_INT_ACCESSORS
#undef DECL_INT32_ACCESSORS
#undef DECL_UINT16_ACCESSORS
#undef DECL_INT16_ACCESSORS
@@ -61,7 +59,9 @@
#undef RELEASE_ACQUIRE_WEAK_ACCESSORS
#undef SMI_ACCESSORS_CHECKED
#undef SMI_ACCESSORS
-#undef SYNCHRONIZED_SMI_ACCESSORS
+#undef DECL_RELEASE_ACQUIRE_INT_ACCESSORS
+#undef RELEASE_ACQUIRE_SMI_ACCESSORS
+#undef DECL_RELAXED_SMI_ACCESSORS
#undef RELAXED_SMI_ACCESSORS
#undef BOOL_GETTER
#undef BOOL_ACCESSORS
diff --git a/chromium/v8/src/objects/object-macros.h b/chromium/v8/src/objects/object-macros.h
index 2a742d5d77d..ed4637a406a 100644
--- a/chromium/v8/src/objects/object-macros.h
+++ b/chromium/v8/src/objects/object-macros.h
@@ -56,17 +56,10 @@
DECL_PRIMITIVE_GETTER(name, type) \
DECL_PRIMITIVE_SETTER(name, type)
-#define DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS(name, type) \
- inline type synchronized_##name() const; \
- inline void synchronized_set_##name(type value);
-
#define DECL_BOOLEAN_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, bool)
#define DECL_INT_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int)
-#define DECL_SYNCHRONIZED_INT_ACCESSORS(name) \
- DECL_SYNCHRONIZED_PRIMITIVE_ACCESSORS(name, int)
-
#define DECL_INT32_ACCESSORS(name) DECL_PRIMITIVE_ACCESSORS(name, int32_t)
#define DECL_UINT16_ACCESSORS(name) \
@@ -324,21 +317,29 @@
#define SMI_ACCESSORS(holder, name, offset) \
SMI_ACCESSORS_CHECKED(holder, name, offset, true)
-#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
- int holder::synchronized_##name() const { \
+#define DECL_RELEASE_ACQUIRE_INT_ACCESSORS(name) \
+ inline int name(AcquireLoadTag) const; \
+ inline void set_##name(int value, ReleaseStoreTag);
+
+#define RELEASE_ACQUIRE_SMI_ACCESSORS(holder, name, offset) \
+ int holder::name(AcquireLoadTag) const { \
Smi value = TaggedField<Smi, offset>::Acquire_Load(*this); \
return value.value(); \
} \
- void holder::synchronized_set_##name(int value) { \
+ void holder::set_##name(int value, ReleaseStoreTag) { \
TaggedField<Smi, offset>::Release_Store(*this, Smi::FromInt(value)); \
}
+#define DECL_RELAXED_SMI_ACCESSORS(name) \
+ inline int name(RelaxedLoadTag) const; \
+ inline void set_##name(int value, RelaxedStoreTag);
+
#define RELAXED_SMI_ACCESSORS(holder, name, offset) \
- int holder::relaxed_read_##name() const { \
+ int holder::name(RelaxedLoadTag) const { \
Smi value = TaggedField<Smi, offset>::Relaxed_Load(*this); \
return value.value(); \
} \
- void holder::relaxed_write_##name(int value) { \
+ void holder::set_##name(int value, RelaxedStoreTag) { \
TaggedField<Smi, offset>::Relaxed_Store(*this, Smi::FromInt(value)); \
}
@@ -604,7 +605,7 @@ static_assert(sizeof(unsigned) == sizeof(uint32_t),
static_cast<base::Atomic8>(value));
#ifdef OBJECT_PRINT
-#define DECL_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
+#define DECL_PRINTER(Name) void Name##Print(std::ostream& os);
#else
#define DECL_PRINTER(Name)
#endif
diff --git a/chromium/v8/src/objects/objects-body-descriptors-inl.h b/chromium/v8/src/objects/objects-body-descriptors-inl.h
index 2ffe2a44144..18dceb527d4 100644
--- a/chromium/v8/src/objects/objects-body-descriptors-inl.h
+++ b/chromium/v8/src/objects/objects-body-descriptors-inl.h
@@ -15,6 +15,7 @@
#include "src/objects/hash-table.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-weak-refs.h"
+#include "src/objects/megadom-handler-inl.h"
#include "src/objects/objects-body-descriptors.h"
#include "src/objects/oddball.h"
#include "src/objects/ordered-hash-table-inl.h"
@@ -103,12 +104,17 @@ DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(HeapObject obj,
int start_offset,
int end_offset,
ObjectVisitor* v) {
+ if (start_offset == HeapObject::kMapOffset) {
+ v->VisitMapPointer(obj);
+ start_offset += kTaggedSize;
+ }
v->VisitPointers(obj, obj.RawField(start_offset), obj.RawField(end_offset));
}
template <typename ObjectVisitor>
void BodyDescriptorBase::IteratePointer(HeapObject obj, int offset,
ObjectVisitor* v) {
+ DCHECK_NE(offset, HeapObject::kMapOffset);
v->VisitPointer(obj, obj.RawField(offset));
}
@@ -122,6 +128,7 @@ DISABLE_CFI_PERF void BodyDescriptorBase::IterateMaybeWeakPointers(
template <typename ObjectVisitor>
void BodyDescriptorBase::IterateMaybeWeakPointer(HeapObject obj, int offset,
ObjectVisitor* v) {
+ DCHECK_NE(offset, HeapObject::kMapOffset);
v->VisitPointer(obj, obj.RawMaybeWeakField(offset));
}
@@ -423,7 +430,7 @@ class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {}
static inline int SizeOf(Map map, HeapObject obj) {
- return ByteArray::SizeFor(ByteArray::cast(obj).synchronized_length());
+ return ByteArray::SizeFor(ByteArray::cast(obj).length(kAcquireLoad));
}
};
@@ -444,7 +451,7 @@ class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
return BytecodeArray::SizeFor(
- BytecodeArray::cast(obj).synchronized_length());
+ BytecodeArray::cast(obj).length(kAcquireLoad));
}
};
@@ -457,7 +464,7 @@ class BigInt::BodyDescriptor final : public BodyDescriptorBase {
ObjectVisitor* v) {}
static inline int SizeOf(Map map, HeapObject obj) {
- return BigInt::SizeFor(BigInt::cast(obj).synchronized_length());
+ return BigInt::SizeFor(BigInt::cast(obj).length(kAcquireLoad));
}
};
@@ -471,7 +478,7 @@ class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject obj) {
return FixedDoubleArray::SizeFor(
- FixedDoubleArray::cast(obj).synchronized_length());
+ FixedDoubleArray::cast(obj).length(kAcquireLoad));
}
};
@@ -582,6 +589,43 @@ class WasmTypeInfo::BodyDescriptor final : public BodyDescriptorBase {
static inline int SizeOf(Map map, HeapObject object) { return kSize; }
};
+class WasmJSFunctionData::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ UNREACHABLE();
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ Foreign::BodyDescriptor::IterateBody<ObjectVisitor>(map, obj, object_size,
+ v);
+ IteratePointers(obj, WasmFunctionData::kStartOfStrongFieldsOffset,
+ kEndOfStrongFieldsOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+};
+
+class WasmExportedFunctionData::BodyDescriptor final
+ : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(Map map, HeapObject obj, int offset) {
+ UNREACHABLE();
+ }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(Map map, HeapObject obj, int object_size,
+ ObjectVisitor* v) {
+ Foreign::BodyDescriptor::IterateBody<ObjectVisitor>(map, obj, object_size,
+ v);
+ IteratePointers(obj, WasmFunctionData::kStartOfStrongFieldsOffset,
+ kEndOfStrongFieldsOffset, v);
+ }
+
+ static inline int SizeOf(Map map, HeapObject object) { return kSize; }
+};
+
class WasmInstanceObject::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
@@ -655,7 +699,7 @@ class WasmStruct::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return map.instance_size();
+ return WasmStruct::GcSafeSize(map);
}
};
@@ -944,6 +988,12 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
#if V8_ENABLE_WEBASSEMBLY
case WASM_ARRAY_TYPE:
return Op::template apply<WasmArray::BodyDescriptor>(p1, p2, p3, p4);
+ case WASM_EXPORTED_FUNCTION_DATA_TYPE:
+ return Op::template apply<WasmExportedFunctionData::BodyDescriptor>(
+ p1, p2, p3, p4);
+ case WASM_JS_FUNCTION_DATA_TYPE:
+ return Op::template apply<WasmJSFunctionData::BodyDescriptor>(p1, p2, p3,
+ p4);
case WASM_STRUCT_TYPE:
return Op::template apply<WasmStruct::BodyDescriptor>(p1, p2, p3, p4);
case WASM_TYPE_INFO_TYPE:
@@ -1134,7 +1184,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3, T4 p4) {
template <typename ObjectVisitor>
void HeapObject::IterateFast(ObjectVisitor* v) {
- BodyDescriptorBase::IteratePointer(*this, kMapOffset, v);
+ v->VisitMapPointer(*this);
IterateBodyFast(v);
}
diff --git a/chromium/v8/src/objects/objects-definitions.h b/chromium/v8/src/objects/objects-definitions.h
index 43560caab81..20ce96aae5d 100644
--- a/chromium/v8/src/objects/objects-definitions.h
+++ b/chromium/v8/src/objects/objects-definitions.h
@@ -152,12 +152,8 @@ namespace internal {
template_object_description) \
V(_, TUPLE2_TYPE, Tuple2, tuple2) \
IF_WASM(V, _, WASM_EXCEPTION_TAG_TYPE, WasmExceptionTag, wasm_exception_tag) \
- IF_WASM(V, _, WASM_EXPORTED_FUNCTION_DATA_TYPE, WasmExportedFunctionData, \
- wasm_exported_function_data) \
IF_WASM(V, _, WASM_INDIRECT_FUNCTION_TABLE_TYPE, WasmIndirectFunctionTable, \
- wasm_indirect_function_table) \
- IF_WASM(V, _, WASM_JS_FUNCTION_DATA_TYPE, WasmJSFunctionData, \
- wasm_js_function_data)
+ wasm_indirect_function_table)
// Adapts one STRUCT_LIST_GENERATOR entry to the STRUCT_LIST entry
#define STRUCT_LIST_ADAPTER(V, NAME, Name, name) V(NAME, Name, name)
diff --git a/chromium/v8/src/objects/objects-inl.h b/chromium/v8/src/objects/objects-inl.h
index c94feca2501..1a0ff834b26 100644
--- a/chromium/v8/src/objects/objects-inl.h
+++ b/chromium/v8/src/objects/objects-inl.h
@@ -16,6 +16,7 @@
#include "src/base/memory.h"
#include "src/builtins/builtins.h"
#include "src/common/external-pointer-inl.h"
+#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/factory.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -42,7 +43,6 @@
#include "src/objects/tagged-impl-inl.h"
#include "src/objects/tagged-index.h"
#include "src/objects/templates.h"
-#include "src/sanitizer/tsan.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -631,10 +631,9 @@ void Object::InitExternalPointerField(size_t offset, Isolate* isolate,
i::InitExternalPointerField(field_address(offset), isolate, value, tag);
}
-Address Object::ReadExternalPointerField(size_t offset,
- PtrComprCageBase isolate_root,
+Address Object::ReadExternalPointerField(size_t offset, Isolate* isolate,
ExternalPointerTag tag) const {
- return i::ReadExternalPointerField(field_address(offset), isolate_root, tag);
+ return i::ReadExternalPointerField(field_address(offset), isolate, tag);
}
void Object::WriteExternalPointerField(size_t offset, Isolate* isolate,
@@ -650,11 +649,26 @@ MaybeObjectSlot HeapObject::RawMaybeWeakField(int byte_offset) const {
return MaybeObjectSlot(field_address(byte_offset));
}
-MapWord MapWord::FromMap(const Map map) { return MapWord(map.ptr()); }
+MapWord MapWord::FromMap(const Map map) {
+ DCHECK(map.is_null() || !MapWord::IsPacked(map.ptr()));
+#ifdef V8_MAP_PACKING
+ return MapWord(Pack(map.ptr()));
+#else
+ return MapWord(map.ptr());
+#endif
+}
-Map MapWord::ToMap() const { return Map::unchecked_cast(Object(value_)); }
+Map MapWord::ToMap() const {
+#ifdef V8_MAP_PACKING
+ return Map::unchecked_cast(Object(Unpack(value_)));
+#else
+ return Map::unchecked_cast(Object(value_));
+#endif
+}
-bool MapWord::IsForwardingAddress() const { return HAS_SMI_TAG(value_); }
+bool MapWord::IsForwardingAddress() const {
+ return (value_ & kForwardingTagMask) == kForwardingTag;
+}
MapWord MapWord::FromForwardingAddress(HeapObject object) {
return MapWord(object.ptr() - kHeapObjectTag);
@@ -697,7 +711,9 @@ ReadOnlyRoots HeapObject::GetReadOnlyRoots(PtrComprCageBase cage_base) const {
#endif
}
-DEF_GETTER(HeapObject, map, Map) { return map_word(cage_base).ToMap(); }
+DEF_GETTER(HeapObject, map, Map) {
+ return map_word(cage_base, kRelaxedLoad).ToMap();
+}
void HeapObject::set_map(Map value) {
#ifdef VERIFY_HEAP
@@ -705,7 +721,7 @@ void HeapObject::set_map(Map value) {
GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
}
#endif
- set_map_word(MapWord::FromMap(value));
+ set_map_word(MapWord::FromMap(value), kRelaxedStore);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
@@ -715,17 +731,21 @@ void HeapObject::set_map(Map value) {
#endif
}
-DEF_GETTER(HeapObject, synchronized_map, Map) {
- return synchronized_map_word(cage_base).ToMap();
+Map HeapObject::map(AcquireLoadTag tag) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return HeapObject::map(cage_base, tag);
+}
+Map HeapObject::map(PtrComprCageBase cage_base, AcquireLoadTag tag) const {
+ return map_word(cage_base, tag).ToMap();
}
-void HeapObject::synchronized_set_map(Map value) {
+void HeapObject::set_map(Map value, ReleaseStoreTag tag) {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !value.is_null()) {
GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
}
#endif
- synchronized_set_map_word(MapWord::FromMap(value));
+ set_map_word(MapWord::FromMap(value), tag);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (!value.is_null()) {
// TODO(1600) We are passing kNullAddress as a slot because maps can never
@@ -742,11 +762,12 @@ void HeapObject::set_map_no_write_barrier(Map value) {
GetHeapFromWritableObject(*this)->VerifyObjectLayoutChange(*this, value);
}
#endif
- set_map_word(MapWord::FromMap(value));
+ set_map_word(MapWord::FromMap(value), kRelaxedStore);
}
void HeapObject::set_map_after_allocation(Map value, WriteBarrierMode mode) {
- set_map_word(MapWord::FromMap(value));
+ MapWord mapword = MapWord::FromMap(value);
+ set_map_word(mapword, kRelaxedStore);
#ifndef V8_DISABLE_WRITE_BARRIERS
if (mode != SKIP_WRITE_BARRIER) {
DCHECK(!value.is_null());
@@ -761,20 +782,28 @@ ObjectSlot HeapObject::map_slot() const {
return ObjectSlot(MapField::address(*this));
}
-DEF_GETTER(HeapObject, map_word, MapWord) {
- return MapField::Relaxed_Load(cage_base, *this);
+MapWord HeapObject::map_word(RelaxedLoadTag tag) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return HeapObject::map_word(cage_base, tag);
+}
+MapWord HeapObject::map_word(PtrComprCageBase cage_base, RelaxedLoadTag) const {
+ return MapField::Relaxed_Load_Map_Word(cage_base, *this);
}
-void HeapObject::set_map_word(MapWord map_word) {
- MapField::Relaxed_Store(*this, map_word);
+void HeapObject::set_map_word(MapWord map_word, RelaxedStoreTag) {
+ MapField::Relaxed_Store_Map_Word(*this, map_word);
}
-DEF_GETTER(HeapObject, synchronized_map_word, MapWord) {
- return MapField::Acquire_Load(cage_base, *this);
+MapWord HeapObject::map_word(AcquireLoadTag tag) const {
+ PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
+ return HeapObject::map_word(cage_base, tag);
+}
+MapWord HeapObject::map_word(PtrComprCageBase cage_base, AcquireLoadTag) const {
+ return MapField::Acquire_Load_No_Unpack(cage_base, *this);
}
-void HeapObject::synchronized_set_map_word(MapWord map_word) {
- MapField::Release_Store(*this, map_word);
+void HeapObject::set_map_word(MapWord map_word, ReleaseStoreTag) {
+ MapField::Release_Store_Map_Word(*this, map_word);
}
bool HeapObject::release_compare_and_swap_map_word(MapWord old_map_word,
@@ -1081,8 +1110,7 @@ static inline uint32_t ObjectAddressForHashing(Address object) {
static inline Handle<Object> MakeEntryPair(Isolate* isolate, size_t index,
Handle<Object> value) {
Handle<Object> key = isolate->factory()->SizeToString(index);
- Handle<FixedArray> entry_storage =
- isolate->factory()->NewUninitializedFixedArray(2);
+ Handle<FixedArray> entry_storage = isolate->factory()->NewFixedArray(2);
{
entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
@@ -1093,8 +1121,7 @@ static inline Handle<Object> MakeEntryPair(Isolate* isolate, size_t index,
static inline Handle<Object> MakeEntryPair(Isolate* isolate, Handle<Object> key,
Handle<Object> value) {
- Handle<FixedArray> entry_storage =
- isolate->factory()->NewUninitializedFixedArray(2);
+ Handle<FixedArray> entry_storage = isolate->factory()->NewFixedArray(2);
{
entry_storage->set(0, *key, SKIP_WRITE_BARRIER);
entry_storage->set(1, *value, SKIP_WRITE_BARRIER);
diff --git a/chromium/v8/src/objects/objects.cc b/chromium/v8/src/objects/objects.cc
index 18e659a910b..74b970b5451 100644
--- a/chromium/v8/src/objects/objects.cc
+++ b/chromium/v8/src/objects/objects.cc
@@ -41,7 +41,6 @@
#include "src/heap/read-only-heap.h"
#include "src/ic/ic.h"
#include "src/init/bootstrapper.h"
-#include "src/logging/counters-inl.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/allocation-site-inl.h"
@@ -102,6 +101,7 @@
#include "src/objects/literal-objects-inl.h"
#include "src/objects/map-inl.h"
#include "src/objects/map.h"
+#include "src/objects/megadom-handler-inl.h"
#include "src/objects/microtask-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/promise-inl.h"
@@ -1320,8 +1320,8 @@ Handle<SharedFunctionInfo> FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(
return result;
}
-bool FunctionTemplateInfo::IsTemplateFor(Map map) {
- RuntimeCallTimerScope timer(
+bool FunctionTemplateInfo::IsTemplateFor(Map map) const {
+ RCS_SCOPE(
LocalHeap::Current() == nullptr
? GetIsolate()->counters()->runtime_call_stats()
: LocalIsolate::FromHeap(LocalHeap::Current())->runtime_call_stats(),
@@ -1350,16 +1350,32 @@ bool FunctionTemplateInfo::IsTemplateFor(Map map) {
return false;
}
+bool FunctionTemplateInfo::IsLeafTemplateForApiObject(Object object) const {
+ i::DisallowGarbageCollection no_gc;
+
+ if (!object.IsJSApiObject()) {
+ return false;
+ }
+
+ bool result = false;
+ Map map = HeapObject::cast(object).map();
+ Object constructor_obj = map.GetConstructor();
+ if (constructor_obj.IsJSFunction()) {
+ JSFunction fun = JSFunction::cast(constructor_obj);
+ result = (*this == fun.shared().function_data(kAcquireLoad));
+ } else if (constructor_obj.IsFunctionTemplateInfo()) {
+ result = (*this == constructor_obj);
+ }
+ DCHECK_IMPLIES(result, IsTemplateFor(map));
+ return result;
+}
+
// static
FunctionTemplateRareData FunctionTemplateInfo::AllocateFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
DCHECK(function_template_info->rare_data(kAcquireLoad).IsUndefined(isolate));
- Handle<Struct> struct_obj = isolate->factory()->NewStruct(
- FUNCTION_TEMPLATE_RARE_DATA_TYPE, AllocationType::kOld);
Handle<FunctionTemplateRareData> rare_data =
- i::Handle<FunctionTemplateRareData>::cast(struct_obj);
- rare_data->set_c_function(Smi(0));
- rare_data->set_c_signature(Smi(0));
+ isolate->factory()->NewFunctionTemplateRareData();
function_template_info->set_rare_data(*rare_data, kReleaseStore);
return *rare_data;
}
@@ -1887,11 +1903,9 @@ std::ostream& operator<<(std::ostream& os, const Brief& v) {
return os;
}
-void Smi::SmiPrint(std::ostream& os) const { // NOLINT
- os << value();
-}
+void Smi::SmiPrint(std::ostream& os) const { os << value(); }
-void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
+void HeapObject::HeapObjectShortPrint(std::ostream& os) {
os << AsHex::Address(this->ptr()) << " ";
if (IsString()) {
@@ -2043,7 +2057,7 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) { // NOLINT
os << "<FeedbackVector[" << FeedbackVector::cast(*this).length() << "]>";
break;
case FREE_SPACE_TYPE:
- os << "<FreeSpace[" << FreeSpace::cast(*this).size() << "]>";
+ os << "<FreeSpace[" << FreeSpace::cast(*this).size(kRelaxedLoad) << "]>";
break;
case PREPARSE_DATA_TYPE: {
@@ -2200,6 +2214,10 @@ void Tuple2::BriefPrintDetails(std::ostream& os) {
os << " " << Brief(value1()) << ", " << Brief(value2());
}
+void MegaDomHandler::BriefPrintDetails(std::ostream& os) {
+ os << " " << Brief(accessor()) << ", " << Brief(context());
+}
+
void ClassPositions::BriefPrintDetails(std::ostream& os) {
os << " " << start() << ", " << end();
}
@@ -2240,7 +2258,7 @@ int HeapObject::SizeFromMap(Map map) const {
if (base::IsInRange(instance_type, FIRST_FIXED_ARRAY_TYPE,
LAST_FIXED_ARRAY_TYPE)) {
return FixedArray::SizeFor(
- FixedArray::unchecked_cast(*this).synchronized_length());
+ FixedArray::unchecked_cast(*this).length(kAcquireLoad));
}
if (base::IsInRange(instance_type, FIRST_CONTEXT_TYPE, LAST_CONTEXT_TYPE)) {
if (instance_type == NATIVE_CONTEXT_TYPE) return NativeContext::kSize;
@@ -2251,29 +2269,29 @@ int HeapObject::SizeFromMap(Map map) const {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqOneByteString::SizeFor(
- SeqOneByteString::unchecked_cast(*this).synchronized_length());
+ SeqOneByteString::unchecked_cast(*this).length(kAcquireLoad));
}
if (instance_type == BYTE_ARRAY_TYPE) {
return ByteArray::SizeFor(
- ByteArray::unchecked_cast(*this).synchronized_length());
+ ByteArray::unchecked_cast(*this).length(kAcquireLoad));
}
if (instance_type == BYTECODE_ARRAY_TYPE) {
return BytecodeArray::SizeFor(
- BytecodeArray::unchecked_cast(*this).synchronized_length());
+ BytecodeArray::unchecked_cast(*this).length(kAcquireLoad));
}
if (instance_type == FREE_SPACE_TYPE) {
- return FreeSpace::unchecked_cast(*this).relaxed_read_size();
+ return FreeSpace::unchecked_cast(*this).size(kRelaxedLoad);
}
if (instance_type == STRING_TYPE ||
instance_type == INTERNALIZED_STRING_TYPE) {
// Strings may get concurrently truncated, hence we have to access its
// length synchronized.
return SeqTwoByteString::SizeFor(
- SeqTwoByteString::unchecked_cast(*this).synchronized_length());
+ SeqTwoByteString::unchecked_cast(*this).length(kAcquireLoad));
}
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
return FixedDoubleArray::SizeFor(
- FixedDoubleArray::unchecked_cast(*this).synchronized_length());
+ FixedDoubleArray::unchecked_cast(*this).length(kAcquireLoad));
}
if (instance_type == FEEDBACK_METADATA_TYPE) {
return FeedbackMetadata::SizeFor(
@@ -2287,11 +2305,11 @@ int HeapObject::SizeFromMap(Map map) const {
if (base::IsInRange(instance_type, FIRST_WEAK_FIXED_ARRAY_TYPE,
LAST_WEAK_FIXED_ARRAY_TYPE)) {
return WeakFixedArray::SizeFor(
- WeakFixedArray::unchecked_cast(*this).synchronized_length());
+ WeakFixedArray::unchecked_cast(*this).length(kAcquireLoad));
}
if (instance_type == WEAK_ARRAY_LIST_TYPE) {
return WeakArrayList::SizeForCapacity(
- WeakArrayList::unchecked_cast(*this).synchronized_capacity());
+ WeakArrayList::unchecked_cast(*this).capacity());
}
if (instance_type == SMALL_ORDERED_HASH_SET_TYPE) {
return SmallOrderedHashSet::SizeFor(
@@ -2311,7 +2329,7 @@ int HeapObject::SizeFromMap(Map map) const {
}
if (instance_type == PROPERTY_ARRAY_TYPE) {
return PropertyArray::SizeFor(
- PropertyArray::cast(*this).synchronized_length());
+ PropertyArray::cast(*this).length(kAcquireLoad));
}
if (instance_type == FEEDBACK_VECTOR_TYPE) {
return FeedbackVector::SizeFor(
@@ -2339,6 +2357,9 @@ int HeapObject::SizeFromMap(Map map) const {
CoverageInfo::unchecked_cast(*this).slot_count());
}
#if V8_ENABLE_WEBASSEMBLY
+ if (instance_type == WASM_STRUCT_TYPE) {
+ return WasmStruct::GcSafeSize(map);
+ }
if (instance_type == WASM_ARRAY_TYPE) {
return WasmArray::GcSafeSizeFor(map, WasmArray::cast(*this).length());
}
@@ -2822,6 +2843,7 @@ Maybe<bool> Object::SetDataProperty(LookupIterator* it, Handle<Object> value) {
Handle<Object> to_assign = value;
// Convert the incoming value to a number for storing into typed arrays.
+ // TODO(v8:11111): Support RAB / GSAB.
if (it->IsElement() && receiver->IsJSObject() &&
JSObject::cast(*receiver).HasTypedArrayElements()) {
ElementsKind elements_kind = JSObject::cast(*receiver).GetElementsKind();
@@ -2912,8 +2934,9 @@ Maybe<bool> Object::AddDataProperty(LookupIterator* it, Handle<Object> value,
}
Handle<JSObject> receiver_obj = Handle<JSObject>::cast(receiver);
- JSObject::AddDataElement(receiver_obj, it->array_index(), value,
- attributes);
+ MAYBE_RETURN(JSObject::AddDataElement(receiver_obj, it->array_index(),
+ value, attributes),
+ Nothing<bool>());
JSObject::ValidateElements(*receiver_obj);
return Just(true);
} else {
@@ -3406,10 +3429,13 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
}
// 13. If oldLenDesc.[[Writable]] is false, return false.
if (!old_len_desc.writable() ||
- // Also handle the {configurable: true} case since we later use
- // JSArray::SetLength instead of OrdinaryDefineOwnProperty to change
- // the length, and it doesn't have access to the descriptor anymore.
- new_len_desc->configurable()) {
+ // Also handle the {configurable: true} and enumerable changes
+ // since we later use JSArray::SetLength instead of
+ // OrdinaryDefineOwnProperty to change the length,
+ // and it doesn't have access to the descriptor anymore.
+ new_len_desc->configurable() ||
+ (new_len_desc->has_enumerable() &&
+ (old_len_desc.enumerable() != new_len_desc->enumerable()))) {
RETURN_FAILURE(isolate, GetShouldThrow(isolate, should_throw),
NewTypeError(MessageTemplate::kRedefineDisallowed,
isolate->factory()->length_string()));
@@ -3428,7 +3454,7 @@ Maybe<bool> JSArray::ArraySetLength(Isolate* isolate, Handle<JSArray> a,
// (Not needed.)
}
// Most of steps 16 through 19 is implemented by JSArray::SetLength.
- JSArray::SetLength(a, new_len);
+ MAYBE_RETURN(JSArray::SetLength(a, new_len), Nothing<bool>());
// Steps 19d-ii, 20.
if (!new_writable) {
PropertyDescriptor readonly;
@@ -3966,8 +3992,7 @@ Handle<FixedArray> FixedArray::SetAndGrow(Isolate* isolate,
do {
capacity = JSObject::NewElementsCapacity(capacity);
} while (capacity <= index);
- Handle<FixedArray> new_array =
- isolate->factory()->NewUninitializedFixedArray(capacity);
+ Handle<FixedArray> new_array = isolate->factory()->NewFixedArray(capacity);
array->CopyTo(0, *new_array, 0, array->length());
new_array->FillWithHoles(array->length(), new_array->length());
new_array->set(index, *value);
@@ -4336,8 +4361,8 @@ Handle<RegExpMatchInfo> RegExpMatchInfo::ReserveCaptures(
return result;
}
-template <typename LocalIsolate>
-Handle<DescriptorArray> DescriptorArray::Allocate(LocalIsolate* isolate,
+template <typename IsolateT>
+Handle<DescriptorArray> DescriptorArray::Allocate(IsolateT* isolate,
int nof_descriptors,
int slack,
AllocationType allocation) {
@@ -4551,9 +4576,7 @@ void Relocatable::PostGarbageCollectionProcessing(Isolate* isolate) {
}
// Reserve space for statics needing saving and restoring.
-int Relocatable::ArchiveSpacePerThread() {
- return sizeof(Relocatable*); // NOLINT
-}
+int Relocatable::ArchiveSpacePerThread() { return sizeof(Relocatable*); }
// Archive statics that are thread-local.
char* Relocatable::ArchiveState(Isolate* isolate, char* to) {
@@ -4720,7 +4743,8 @@ Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
Handle<Map> current_map = initial_map;
ElementsKind kind = current_map->elements_kind();
DCHECK_EQ(GetInitialFastElementsKind(), kind);
- native_context->set(Context::ArrayMapIndex(kind), *current_map);
+ native_context->set(Context::ArrayMapIndex(kind), *current_map,
+ UPDATE_WRITE_BARRIER, kReleaseStore);
for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
@@ -4733,7 +4757,8 @@ Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
INSERT_TRANSITION);
}
DCHECK_EQ(next_kind, new_map->elements_kind());
- native_context->set(Context::ArrayMapIndex(next_kind), *new_map);
+ native_context->set(Context::ArrayMapIndex(next_kind), *new_map,
+ UPDATE_WRITE_BARRIER, kReleaseStore);
current_map = new_map;
}
return initial_map;
@@ -4783,9 +4808,9 @@ int Script::GetEvalPosition(Isolate* isolate, Handle<Script> script) {
return position;
}
-template <typename LocalIsolate>
+template <typename IsolateT>
// static
-void Script::InitLineEnds(LocalIsolate* isolate, Handle<Script> script) {
+void Script::InitLineEnds(IsolateT* isolate, Handle<Script> script) {
if (!script->line_ends().IsUndefined(isolate)) return;
#if V8_ENABLE_WEBASSEMBLY
DCHECK(script->type() != Script::TYPE_WASM ||
@@ -4996,16 +5021,23 @@ Object Script::GetNameOrSourceURL() {
return name();
}
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- LocalIsolate* isolate, int function_literal_id) {
+ Handle<Script> script, IsolateT* isolate,
+ FunctionLiteral* function_literal) {
+ int function_literal_id = function_literal->function_literal_id();
+ if V8_UNLIKELY (script->type() == Script::TYPE_WEB_SNAPSHOT &&
+ function_literal_id >= script->shared_function_info_count()) {
+ return FindWebSnapshotSharedFunctionInfo(script, isolate, function_literal);
+ }
+
CHECK_NE(function_literal_id, kFunctionLiteralIdInvalid);
// If this check fails, the problem is most probably the function id
// renumbering done by AstFunctionLiteralIdReindexer; in particular, that
// AstTraversalVisitor doesn't recurse properly in the construct which
// triggers the mismatch.
- CHECK_LT(function_literal_id, shared_function_infos().length());
- MaybeObject shared = shared_function_infos().Get(function_literal_id);
+ CHECK_LT(function_literal_id, script->shared_function_info_count());
+ MaybeObject shared = script->shared_function_infos().Get(function_literal_id);
HeapObject heap_object;
if (!shared->GetHeapObject(&heap_object) ||
heap_object.IsUndefined(isolate)) {
@@ -5014,9 +5046,81 @@ MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
return handle(SharedFunctionInfo::cast(heap_object), isolate);
}
template MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- Isolate* isolate, int function_literal_id);
+ Handle<Script> script, Isolate* isolate, FunctionLiteral* function_literal);
template MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- LocalIsolate* isolate, int function_literal_id);
+ Handle<Script> script, LocalIsolate* isolate,
+ FunctionLiteral* function_literal);
+
+MaybeHandle<SharedFunctionInfo> Script::FindWebSnapshotSharedFunctionInfo(
+ Handle<Script> script, Isolate* isolate,
+ FunctionLiteral* function_literal) {
+ // We might be able to de-dupe the SFI against a SFI that was
+ // created when deserializing the snapshot (or when calling a function which
+ // was included in the snapshot). In that case, we can find it based on the
+ // start position in shared_function_info_table.
+ Handle<ObjectHashTable> shared_function_info_table = handle(
+ ObjectHashTable::cast(script->shared_function_info_table()), isolate);
+ {
+ DisallowHeapAllocation no_gc;
+ Object index_object = shared_function_info_table->Lookup(
+ handle(Smi::FromInt(function_literal->start_position()), isolate));
+ if (!index_object.IsTheHole()) {
+ int index = Smi::cast(index_object).value();
+ DCHECK_LT(index, script->shared_function_info_count());
+ MaybeObject maybe_shared = script->shared_function_infos().Get(index);
+ HeapObject heap_object;
+ if (!maybe_shared->GetHeapObject(&heap_object)) {
+ // We found the correct location but it's not filled in (e.g., the weak
+ // pointer to the SharedFunctionInfo has been cleared). Record the
+ // location in the FunctionLiteral, so that it will be refilled later.
+ // SharedFunctionInfo::SetScript will write the SharedFunctionInfo in
+ // the shared_function_infos.
+ function_literal->set_function_literal_id(index);
+ return MaybeHandle<SharedFunctionInfo>();
+ }
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(heap_object);
+ DCHECK_EQ(shared.StartPosition(), function_literal->start_position());
+ DCHECK_EQ(shared.EndPosition(), function_literal->end_position());
+ return handle(shared, isolate);
+ }
+ }
+
+ // It's possible that FunctionLiterals which were processed before this one
+ // were deduplicated against existing ones. Decrease function_literal_id to
+ // avoid holes in shared_function_infos.
+ int old_length = script->shared_function_info_count();
+ int function_literal_id = old_length;
+ function_literal->set_function_literal_id(function_literal_id);
+
+ // Also add to shared_function_info_table.
+ shared_function_info_table = ObjectHashTable::Put(
+ shared_function_info_table,
+ handle(Smi::FromInt(function_literal->start_position()), isolate),
+ handle(Smi::FromInt(function_literal_id), isolate));
+ script->set_shared_function_info_table(*shared_function_info_table);
+
+ // Grow shared_function_infos if needed (we don't know the correct amount of
+ // space needed upfront).
+ int new_length = old_length + 1;
+ Handle<WeakFixedArray> old_infos =
+ handle(script->shared_function_infos(), isolate);
+ if (new_length > old_infos->length()) {
+ int capacity = WeakArrayList::CapacityForLength(new_length);
+ Handle<WeakFixedArray> new_infos(
+ isolate->factory()->NewWeakFixedArray(capacity, AllocationType::kOld));
+ new_infos->CopyElements(isolate, 0, *old_infos, 0, old_length,
+ WriteBarrierMode::UPDATE_WRITE_BARRIER);
+ script->set_shared_function_infos(*new_infos);
+ }
+ return MaybeHandle<SharedFunctionInfo>();
+}
+
+MaybeHandle<SharedFunctionInfo> Script::FindWebSnapshotSharedFunctionInfo(
+ Handle<Script> script, LocalIsolate* isolate,
+ FunctionLiteral* function_literal) {
+ // Off-thread serialization of web snapshots is not implemented.
+ UNREACHABLE();
+}
Script::Iterator::Iterator(Isolate* isolate)
: iterator_(isolate->heap()->script_list()) {}
@@ -5036,13 +5140,13 @@ void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
-void JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
+Maybe<bool> JSArray::SetLength(Handle<JSArray> array, uint32_t new_length) {
// We should never end in here with a pixel or external array.
DCHECK(array->AllowsSetLength());
if (array->SetLengthWouldNormalize(new_length)) {
JSObject::NormalizeElements(array);
}
- array->GetElementsAccessor()->SetLength(array, new_length);
+ return array->GetElementsAccessor()->SetLength(array, new_length);
}
// ES6: 9.5.2 [[SetPrototypeOf]] (V)
@@ -5322,8 +5426,8 @@ Handle<Object> JSPromise::Reject(Handle<JSPromise> promise,
if (isolate->debug()->is_active()) MoveMessageToPromise(isolate, promise);
if (debug_event) isolate->debug()->OnPromiseReject(promise, reason);
- isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
+ isolate->RunAllPromiseHooks(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
// 1. Assert: The value of promise.[[PromiseState]] is "pending".
CHECK_EQ(Promise::kPending, promise->status());
@@ -5358,8 +5462,8 @@ MaybeHandle<Object> JSPromise::Resolve(Handle<JSPromise> promise,
DCHECK(
!reinterpret_cast<v8::Isolate*>(isolate)->GetCurrentContext().IsEmpty());
- isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
+ isolate->RunAllPromiseHooks(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
// 7. If SameValue(resolution, promise) is true, then
if (promise.is_identical_to(resolution)) {
@@ -5508,8 +5612,9 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
static_cast<int>(
PromiseReactionJobTask::kSizeOfAllPromiseReactionJobTasks));
if (type == PromiseReaction::kFulfill) {
- task->synchronized_set_map(
- ReadOnlyRoots(isolate).promise_fulfill_reaction_job_task_map());
+ task->set_map(
+ ReadOnlyRoots(isolate).promise_fulfill_reaction_job_task_map(),
+ kReleaseStore);
Handle<PromiseFulfillReactionJobTask>::cast(task)->set_argument(
*argument);
Handle<PromiseFulfillReactionJobTask>::cast(task)->set_context(
@@ -5528,8 +5633,9 @@ Handle<Object> JSPromise::TriggerPromiseReactions(Isolate* isolate,
kContinuationPreservedEmbedderDataOffset));
} else {
DisallowGarbageCollection no_gc;
- task->synchronized_set_map(
- ReadOnlyRoots(isolate).promise_reject_reaction_job_task_map());
+ task->set_map(
+ ReadOnlyRoots(isolate).promise_reject_reaction_job_task_map(),
+ kReleaseStore);
Handle<PromiseRejectReactionJobTask>::cast(task)->set_argument(*argument);
Handle<PromiseRejectReactionJobTask>::cast(task)->set_context(
*handler_context);
@@ -5568,9 +5674,9 @@ void HashTable<Derived, Shape>::IterateElements(ObjectVisitor* v) {
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Derived> HashTable<Derived, Shape>::New(
- LocalIsolate* isolate, int at_least_space_for, AllocationType allocation,
+ IsolateT* isolate, int at_least_space_for, AllocationType allocation,
MinimumCapacity capacity_option) {
DCHECK_LE(0, at_least_space_for);
DCHECK_IMPLIES(capacity_option == USE_CUSTOM_MINIMUM_CAPACITY,
@@ -5586,9 +5692,9 @@ Handle<Derived> HashTable<Derived, Shape>::New(
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Derived> HashTable<Derived, Shape>::NewInternal(
- LocalIsolate* isolate, int capacity, AllocationType allocation) {
+ IsolateT* isolate, int capacity, AllocationType allocation) {
auto* factory = isolate->factory();
int length = EntryToIndex(InternalIndex(capacity));
Handle<FixedArray> array = factory->NewFixedArrayWithMap(
@@ -5718,9 +5824,9 @@ void HashTable<Derived, Shape>::Rehash(PtrComprCageBase cage_base) {
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Derived> HashTable<Derived, Shape>::EnsureCapacity(
- LocalIsolate* isolate, Handle<Derived> table, int n,
+ IsolateT* isolate, Handle<Derived> table, int n,
AllocationType allocation) {
if (table->HasSufficientCapacityToAdd(n)) return table;
@@ -5851,9 +5957,9 @@ Handle<ObjectHashSet> ObjectHashSet::Add(Isolate* isolate,
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Derived> BaseNameDictionary<Derived, Shape>::New(
- LocalIsolate* isolate, int at_least_space_for, AllocationType allocation,
+ IsolateT* isolate, int at_least_space_for, AllocationType allocation,
MinimumCapacity capacity_option) {
DCHECK_LE(0, at_least_space_for);
Handle<Derived> dict = Dictionary<Derived, Shape>::New(
@@ -5925,10 +6031,10 @@ Handle<Derived> Dictionary<Derived, Shape>::AtPut(Isolate* isolate,
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Derived>
BaseNameDictionary<Derived, Shape>::AddNoUpdateNextEnumerationIndex(
- LocalIsolate* isolate, Handle<Derived> dictionary, Key key,
+ IsolateT* isolate, Handle<Derived> dictionary, Key key,
Handle<Object> value, PropertyDetails details, InternalIndex* entry_out) {
// Insert element at empty or deleted entry.
return Dictionary<Derived, Shape>::Add(isolate, dictionary, key, value,
@@ -5954,8 +6060,8 @@ Handle<Derived> BaseNameDictionary<Derived, Shape>::Add(
}
template <typename Derived, typename Shape>
-template <typename LocalIsolate>
-Handle<Derived> Dictionary<Derived, Shape>::Add(LocalIsolate* isolate,
+template <typename IsolateT>
+Handle<Derived> Dictionary<Derived, Shape>::Add(IsolateT* isolate,
Handle<Derived> dictionary,
Key key, Handle<Object> value,
PropertyDetails details,
@@ -6576,17 +6682,33 @@ AccessCheckInfo AccessCheckInfo::Get(Isolate* isolate,
return AccessCheckInfo::cast(data_obj);
}
-MaybeHandle<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
- Isolate* isolate, Handle<Object> getter) {
- if (getter->IsFunctionTemplateInfo()) {
- Handle<FunctionTemplateInfo> fti =
- Handle<FunctionTemplateInfo>::cast(getter);
- // Check if the accessor uses a cached property.
- if (!fti->cached_property_name().IsTheHole(isolate)) {
- return handle(Name::cast(fti->cached_property_name()), isolate);
- }
- }
- return MaybeHandle<Name>();
+base::Optional<Name> FunctionTemplateInfo::TryGetCachedPropertyName(
+ Isolate* isolate, Object getter) {
+ DisallowGarbageCollection no_gc;
+ if (!getter.IsFunctionTemplateInfo()) return {};
+ // Check if the accessor uses a cached property.
+ Object maybe_name = FunctionTemplateInfo::cast(getter).cached_property_name();
+ if (maybe_name.IsTheHole(isolate)) return {};
+ return Name::cast(maybe_name);
+}
+
+int FunctionTemplateInfo::GetCFunctionsCount() const {
+ i::DisallowHeapAllocation no_gc;
+ return FixedArray::cast(GetCFunctionOverloads()).length() /
+ kFunctionOverloadEntrySize;
+}
+
+Address FunctionTemplateInfo::GetCFunction(int index) const {
+ i::DisallowHeapAllocation no_gc;
+ return v8::ToCData<Address>(FixedArray::cast(GetCFunctionOverloads())
+ .get(index * kFunctionOverloadEntrySize));
+}
+
+const CFunctionInfo* FunctionTemplateInfo::GetCSignature(int index) const {
+ i::DisallowHeapAllocation no_gc;
+ return v8::ToCData<CFunctionInfo*>(
+ FixedArray::cast(GetCFunctionOverloads())
+ .get(index * kFunctionOverloadEntrySize + 1));
}
Address Smi::LexicographicCompare(Isolate* isolate, Smi x, Smi y) {
diff --git a/chromium/v8/src/objects/objects.h b/chromium/v8/src/objects/objects.h
index e4532bb0e5e..e80098dfe8d 100644
--- a/chromium/v8/src/objects/objects.h
+++ b/chromium/v8/src/objects/objects.h
@@ -602,7 +602,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
// Prints this object without details to a message accumulator.
V8_EXPORT_PRIVATE void ShortPrint(StringStream* accumulator) const;
- V8_EXPORT_PRIVATE void ShortPrint(std::ostream& os) const; // NOLINT
+ V8_EXPORT_PRIVATE void ShortPrint(std::ostream& os) const;
inline static Object cast(Object object) { return object; }
inline static Object unchecked_cast(Object object) { return object; }
@@ -615,10 +615,10 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
V8_EXPORT_PRIVATE void Print() const;
// Prints this object with details.
- V8_EXPORT_PRIVATE void Print(std::ostream& os) const; // NOLINT
+ V8_EXPORT_PRIVATE void Print(std::ostream& os) const;
#else
void Print() const { ShortPrint(); }
- void Print(std::ostream& os) const { ShortPrint(os); } // NOLINT
+ void Print(std::ostream& os) const { ShortPrint(os); }
#endif
// For use with std::unordered_set.
@@ -673,8 +673,7 @@ class Object : public TaggedImpl<HeapObjectReferenceType::STRONG, Address> {
inline void InitExternalPointerField(size_t offset, Isolate* isolate);
inline void InitExternalPointerField(size_t offset, Isolate* isolate,
Address value, ExternalPointerTag tag);
- inline Address ReadExternalPointerField(size_t offset,
- PtrComprCageBase isolate_root,
+ inline Address ReadExternalPointerField(size_t offset, Isolate* isolate,
ExternalPointerTag tag) const;
inline void WriteExternalPointerField(size_t offset, Isolate* isolate,
Address value, ExternalPointerTag tag);
@@ -778,6 +777,23 @@ class MapWord {
inline Address ptr() { return value_; }
+#ifdef V8_MAP_PACKING
+ static constexpr Address Pack(Address map) {
+ return map ^ Internals::kMapWordXorMask;
+ }
+ static constexpr Address Unpack(Address mapword) {
+ // TODO(wenyuzhao): Clear header metadata.
+ return mapword ^ Internals::kMapWordXorMask;
+ }
+ static constexpr bool IsPacked(Address mapword) {
+ return (static_cast<intptr_t>(mapword) & Internals::kMapWordXorMask) ==
+ Internals::kMapWordSignature &&
+ (0xffffffff00000000 & static_cast<intptr_t>(mapword)) != 0;
+ }
+#else
+ static constexpr bool IsPacked(Address) { return false; }
+#endif
+
private:
// HeapObject calls the private constructor and directly reads the value.
friend class HeapObject;
diff --git a/chromium/v8/src/objects/ordered-hash-table.cc b/chromium/v8/src/objects/ordered-hash-table.cc
index a19217878dc..c6754db9378 100644
--- a/chromium/v8/src/objects/ordered-hash-table.cc
+++ b/chromium/v8/src/objects/ordered-hash-table.cc
@@ -16,9 +16,9 @@ namespace v8 {
namespace internal {
template <class Derived, int entrysize>
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Allocate(
- LocalIsolate* isolate, int capacity, AllocationType allocation) {
+ IsolateT* isolate, int capacity, AllocationType allocation) {
// Capacity must be a power of two, since we depend on being able
// to divide and multiple by 2 (kLoadFactor) to derive capacity
// from number of buckets. If we decide to change kLoadFactor
@@ -63,9 +63,9 @@ MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::AllocateEmpty(
}
template <class Derived, int entrysize>
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::EnsureGrowable(
- LocalIsolate* isolate, Handle<Derived> table) {
+ IsolateT* isolate, Handle<Derived> table) {
DCHECK(!table->IsObsolete());
int nof = table->NumberOfElements();
@@ -238,17 +238,17 @@ HeapObject OrderedHashMap::GetEmpty(ReadOnlyRoots ro_roots) {
}
template <class Derived, int entrysize>
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
- LocalIsolate* isolate, Handle<Derived> table) {
+ IsolateT* isolate, Handle<Derived> table) {
return OrderedHashTable<Derived, entrysize>::Rehash(isolate, table,
table->Capacity());
}
template <class Derived, int entrysize>
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<Derived> OrderedHashTable<Derived, entrysize>::Rehash(
- LocalIsolate* isolate, Handle<Derived> table, int new_capacity) {
+ IsolateT* isolate, Handle<Derived> table, int new_capacity) {
DCHECK(!table->IsObsolete());
MaybeHandle<Derived> new_table_candidate =
@@ -320,10 +320,9 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Rehash(Isolate* isolate,
return Base::Rehash(isolate, table, new_capacity);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Rehash(
- LocalIsolate* isolate, Handle<OrderedNameDictionary> table,
- int new_capacity) {
+ IsolateT* isolate, Handle<OrderedNameDictionary> table, int new_capacity) {
MaybeHandle<OrderedNameDictionary> new_table_candidate =
Base::Rehash(isolate, table, new_capacity);
Handle<OrderedNameDictionary> new_table;
@@ -407,9 +406,8 @@ MaybeHandle<OrderedHashMap> OrderedHashMap::Add(Isolate* isolate,
return table;
}
-template <typename LocalIsolate>
-InternalIndex OrderedNameDictionary::FindEntry(LocalIsolate* isolate,
- Object key) {
+template <typename IsolateT>
+InternalIndex OrderedNameDictionary::FindEntry(IsolateT* isolate, Object key) {
DisallowGarbageCollection no_gc;
DCHECK(key.IsUniqueName());
@@ -438,10 +436,10 @@ InternalIndex OrderedNameDictionary::FindEntry(LocalIsolate* isolate,
return InternalIndex::NotFound();
}
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Add(
- LocalIsolate* isolate, Handle<OrderedNameDictionary> table,
- Handle<Name> key, Handle<Object> value, PropertyDetails details) {
+ IsolateT* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
+ Handle<Object> value, PropertyDetails details) {
DCHECK(key->IsUniqueName());
DCHECK(table->FindEntry(isolate, *key).is_not_found());
@@ -505,21 +503,21 @@ Handle<OrderedNameDictionary> OrderedNameDictionary::DeleteEntry(
return Shrink(isolate, table);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<OrderedHashSet> OrderedHashSet::Allocate(
- LocalIsolate* isolate, int capacity, AllocationType allocation) {
+ IsolateT* isolate, int capacity, AllocationType allocation) {
return Base::Allocate(isolate, capacity, allocation);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<OrderedHashMap> OrderedHashMap::Allocate(
- LocalIsolate* isolate, int capacity, AllocationType allocation) {
+ IsolateT* isolate, int capacity, AllocationType allocation) {
return Base::Allocate(isolate, capacity, allocation);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
MaybeHandle<OrderedNameDictionary> OrderedNameDictionary::Allocate(
- LocalIsolate* isolate, int capacity, AllocationType allocation) {
+ IsolateT* isolate, int capacity, AllocationType allocation) {
MaybeHandle<OrderedNameDictionary> table_candidate =
Base::Allocate(isolate, capacity, allocation);
Handle<OrderedNameDictionary> table;
diff --git a/chromium/v8/src/objects/ordered-hash-table.h b/chromium/v8/src/objects/ordered-hash-table.h
index 1746e2dc896..1110352e46b 100644
--- a/chromium/v8/src/objects/ordered-hash-table.h
+++ b/chromium/v8/src/objects/ordered-hash-table.h
@@ -67,8 +67,8 @@ class OrderedHashTable : public FixedArray {
public:
// Returns an OrderedHashTable (possibly |table|) with enough space
// to add at least one new element.
- template <typename LocalIsolate>
- static MaybeHandle<Derived> EnsureGrowable(LocalIsolate* isolate,
+ template <typename IsolateT>
+ static MaybeHandle<Derived> EnsureGrowable(IsolateT* isolate,
Handle<Derived> table);
// Returns an OrderedHashTable (possibly |table|) that's shrunken
@@ -200,21 +200,20 @@ class OrderedHashTable : public FixedArray {
protected:
// Returns an OrderedHashTable with a capacity of at least |capacity|.
- template <typename LocalIsolate>
+ template <typename IsolateT>
static MaybeHandle<Derived> Allocate(
- LocalIsolate* isolate, int capacity,
+ IsolateT* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
static MaybeHandle<Derived> AllocateEmpty(Isolate* isolate,
AllocationType allocation,
RootIndex root_ndex);
- template <typename LocalIsolate>
- static MaybeHandle<Derived> Rehash(LocalIsolate* isolate,
- Handle<Derived> table);
- template <typename LocalIsolate>
- static MaybeHandle<Derived> Rehash(LocalIsolate* isolate,
- Handle<Derived> table, int new_capacity);
+ template <typename IsolateT>
+ static MaybeHandle<Derived> Rehash(IsolateT* isolate, Handle<Derived> table);
+ template <typename IsolateT>
+ static MaybeHandle<Derived> Rehash(IsolateT* isolate, Handle<Derived> table,
+ int new_capacity);
int HashToEntryRaw(int hash) {
int bucket = HashToBucket(hash);
@@ -287,9 +286,9 @@ class V8_EXPORT_PRIVATE OrderedHashSet
int new_capacity);
static MaybeHandle<OrderedHashSet> Rehash(Isolate* isolate,
Handle<OrderedHashSet> table);
- template <typename LocalIsolate>
+ template <typename IsolateT>
static MaybeHandle<OrderedHashSet> Allocate(
- LocalIsolate* isolate, int capacity,
+ IsolateT* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
static MaybeHandle<OrderedHashSet> AllocateEmpty(
@@ -318,9 +317,9 @@ class V8_EXPORT_PRIVATE OrderedHashMap
Handle<Object> key,
Handle<Object> value);
- template <typename LocalIsolate>
+ template <typename IsolateT>
static MaybeHandle<OrderedHashMap> Allocate(
- LocalIsolate* isolate, int capacity,
+ IsolateT* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
static MaybeHandle<OrderedHashMap> AllocateEmpty(
@@ -761,24 +760,24 @@ class V8_EXPORT_PRIVATE OrderedNameDictionary
DECL_CAST(OrderedNameDictionary)
DECL_PRINTER(OrderedNameDictionary)
- template <typename LocalIsolate>
+ template <typename IsolateT>
static MaybeHandle<OrderedNameDictionary> Add(
- LocalIsolate* isolate, Handle<OrderedNameDictionary> table,
- Handle<Name> key, Handle<Object> value, PropertyDetails details);
+ IsolateT* isolate, Handle<OrderedNameDictionary> table, Handle<Name> key,
+ Handle<Object> value, PropertyDetails details);
void SetEntry(InternalIndex entry, Object key, Object value,
PropertyDetails details);
- template <typename LocalIsolate>
- InternalIndex FindEntry(LocalIsolate* isolate, Object key);
+ template <typename IsolateT>
+ InternalIndex FindEntry(IsolateT* isolate, Object key);
// This is to make the interfaces of NameDictionary::FindEntry and
// OrderedNameDictionary::FindEntry compatible.
// TODO(emrich) clean this up: NameDictionary uses Handle<Object>
// for FindEntry keys due to its Key typedef, but that's also used
// for adding, where we do need handles.
- template <typename LocalIsolate>
- InternalIndex FindEntry(LocalIsolate* isolate, Handle<Object> key) {
+ template <typename IsolateT>
+ InternalIndex FindEntry(IsolateT* isolate, Handle<Object> key) {
return FindEntry(isolate, *key);
}
@@ -786,18 +785,17 @@ class V8_EXPORT_PRIVATE OrderedNameDictionary
Isolate* isolate, Handle<OrderedNameDictionary> table,
InternalIndex entry);
- template <typename LocalIsolate>
+ template <typename IsolateT>
static MaybeHandle<OrderedNameDictionary> Allocate(
- LocalIsolate* isolate, int capacity,
+ IsolateT* isolate, int capacity,
AllocationType allocation = AllocationType::kYoung);
static MaybeHandle<OrderedNameDictionary> AllocateEmpty(
Isolate* isolate, AllocationType allocation = AllocationType::kReadOnly);
- template <typename LocalIsolate>
+ template <typename IsolateT>
static MaybeHandle<OrderedNameDictionary> Rehash(
- LocalIsolate* isolate, Handle<OrderedNameDictionary> table,
- int new_capacity);
+ IsolateT* isolate, Handle<OrderedNameDictionary> table, int new_capacity);
// Returns the value for entry.
inline Object ValueAt(InternalIndex entry);
diff --git a/chromium/v8/src/objects/property-array-inl.h b/chromium/v8/src/objects/property-array-inl.h
index fe884b043fd..b51b0d5f2d0 100644
--- a/chromium/v8/src/objects/property-array-inl.h
+++ b/chromium/v8/src/objects/property-array-inl.h
@@ -22,7 +22,8 @@ OBJECT_CONSTRUCTORS_IMPL(PropertyArray, HeapObject)
CAST_ACCESSOR(PropertyArray)
SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
-SYNCHRONIZED_SMI_ACCESSORS(PropertyArray, length_and_hash, kLengthAndHashOffset)
+RELEASE_ACQUIRE_SMI_ACCESSORS(PropertyArray, length_and_hash,
+ kLengthAndHashOffset)
Object PropertyArray::get(int index) const {
PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
@@ -64,8 +65,8 @@ void PropertyArray::initialize_length(int len) {
set_length_and_hash(len);
}
-int PropertyArray::synchronized_length() const {
- return LengthField::decode(synchronized_length_and_hash());
+int PropertyArray::length(AcquireLoadTag) const {
+ return LengthField::decode(length_and_hash(kAcquireLoad));
}
int PropertyArray::Hash() const { return HashField::decode(length_and_hash()); }
diff --git a/chromium/v8/src/objects/property-array.h b/chromium/v8/src/objects/property-array.h
index f4cc5c9fb1c..8ee51982c00 100644
--- a/chromium/v8/src/objects/property-array.h
+++ b/chromium/v8/src/objects/property-array.h
@@ -18,9 +18,7 @@ class PropertyArray : public HeapObject {
public:
// [length]: length of the array.
inline int length() const;
-
- // Get the length using acquire loads.
- inline int synchronized_length() const;
+ inline int length(AcquireLoadTag) const;
// This is only used on a newly allocated PropertyArray which
// doesn't have an existing hash.
@@ -70,7 +68,7 @@ class PropertyArray : public HeapObject {
private:
DECL_INT_ACCESSORS(length_and_hash)
- DECL_SYNCHRONIZED_INT_ACCESSORS(length_and_hash)
+ DECL_RELEASE_ACQUIRE_INT_ACCESSORS(length_and_hash)
OBJECT_CONSTRUCTORS(PropertyArray, HeapObject);
};
diff --git a/chromium/v8/src/objects/scope-info.cc b/chromium/v8/src/objects/scope-info.cc
index 308b57a309d..67a64e873aa 100644
--- a/chromium/v8/src/objects/scope-info.cc
+++ b/chromium/v8/src/objects/scope-info.cc
@@ -57,9 +57,8 @@ bool ScopeInfo::Equals(ScopeInfo other) const {
#endif
// static
-template <typename LocalIsolate>
-Handle<ScopeInfo> ScopeInfo::Create(LocalIsolate* isolate, Zone* zone,
- Scope* scope,
+template <typename IsolateT>
+Handle<ScopeInfo> ScopeInfo::Create(IsolateT* isolate, Zone* zone, Scope* scope,
MaybeHandle<ScopeInfo> outer_scope) {
// Collect variables.
int context_local_count = 0;
@@ -1078,8 +1077,8 @@ std::ostream& operator<<(std::ostream& os, VariableAllocationInfo var_info) {
return os;
}
-template <typename LocalIsolate>
-Handle<ModuleRequest> ModuleRequest::New(LocalIsolate* isolate,
+template <typename IsolateT>
+Handle<ModuleRequest> ModuleRequest::New(IsolateT* isolate,
Handle<String> specifier,
Handle<FixedArray> import_assertions,
int position) {
@@ -1098,9 +1097,9 @@ template Handle<ModuleRequest> ModuleRequest::New(
LocalIsolate* isolate, Handle<String> specifier,
Handle<FixedArray> import_assertions, int position);
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
- LocalIsolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ IsolateT* isolate, Handle<PrimitiveHeapObject> export_name,
Handle<PrimitiveHeapObject> local_name,
Handle<PrimitiveHeapObject> import_name, int module_request, int cell_index,
int beg_pos, int end_pos) {
@@ -1128,9 +1127,9 @@ template Handle<SourceTextModuleInfoEntry> SourceTextModuleInfoEntry::New(
Handle<PrimitiveHeapObject> import_name, int module_request, int cell_index,
int beg_pos, int end_pos);
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SourceTextModuleInfo> SourceTextModuleInfo::New(
- LocalIsolate* isolate, Zone* zone, SourceTextModuleDescriptor* descr) {
+ IsolateT* isolate, Zone* zone, SourceTextModuleDescriptor* descr) {
// Serialize module requests.
int size = static_cast<int>(descr->module_requests().size());
Handle<FixedArray> module_requests = isolate->factory()->NewFixedArray(size);
diff --git a/chromium/v8/src/objects/scope-info.h b/chromium/v8/src/objects/scope-info.h
index 57e5d2e308f..c90f6bfed9a 100644
--- a/chromium/v8/src/objects/scope-info.h
+++ b/chromium/v8/src/objects/scope-info.h
@@ -213,19 +213,18 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
// closest outer class when resolving private names.
bool PrivateNameLookupSkipsOuterClass() const;
- // REPL mode scopes allow re-declaraction of let variables. They come from
- // debug evaluate but are different to IsDebugEvaluateScope().
+ // REPL mode scopes allow re-declaraction of let and const variables. They
+ // come from debug evaluate but are different to IsDebugEvaluateScope().
bool IsReplModeScope() const;
#ifdef DEBUG
bool Equals(ScopeInfo other) const;
#endif
- template <typename LocalIsolate>
- static Handle<ScopeInfo> Create(LocalIsolate* isolate, Zone* zone,
- Scope* scope,
+ template <typename IsolateT>
+ static Handle<ScopeInfo> Create(IsolateT* isolate, Zone* zone, Scope* scope,
MaybeHandle<ScopeInfo> outer_scope);
- static Handle<ScopeInfo> CreateForWithScope(
+ V8_EXPORT_PRIVATE static Handle<ScopeInfo> CreateForWithScope(
Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
V8_EXPORT_PRIVATE static Handle<ScopeInfo> CreateForEmptyFunction(
Isolate* isolate);
@@ -314,7 +313,7 @@ class ScopeInfo : public TorqueGeneratedScopeInfo<ScopeInfo, HeapObject> {
}
static constexpr int ConvertOffsetToIndex(int offset) {
int index = (offset - HeapObject::kHeaderSize) / kTaggedSize;
- CONSTEXPR_DCHECK(OffsetOfElementAt(index) == offset);
+ DCHECK_EQ(OffsetOfElementAt(index), offset);
return index;
}
diff --git a/chromium/v8/src/objects/script-inl.h b/chromium/v8/src/objects/script-inl.h
index b1e226a0465..4dd8bed3824 100644
--- a/chromium/v8/src/objects/script-inl.h
+++ b/chromium/v8/src/objects/script-inl.h
@@ -25,7 +25,7 @@ NEVER_READ_ONLY_SPACE_IMPL(Script)
#if V8_ENABLE_WEBASSEMBLY
ACCESSORS_CHECKED(Script, wasm_breakpoint_infos, FixedArray,
- kEvalFromSharedOrWrappedArgumentsOffset,
+ kEvalFromSharedOrWrappedArgumentsOrSfiTableOffset,
this->type() == TYPE_WASM)
ACCESSORS_CHECKED(Script, wasm_managed_native_module, Object,
kEvalFromPositionOffset, this->type() == TYPE_WASM)
@@ -37,40 +37,59 @@ ACCESSORS_CHECKED(Script, wasm_weak_instance_list, WeakArrayList,
#endif // V8_ENABLE_WEBASSEMBLY
SMI_ACCESSORS(Script, type, kScriptTypeOffset)
-ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments, Object,
- kEvalFromSharedOrWrappedArgumentsOffset,
+ACCESSORS_CHECKED(Script, eval_from_shared_or_wrapped_arguments_or_sfi_table,
+ Object, kEvalFromSharedOrWrappedArgumentsOrSfiTableOffset,
CHECK_SCRIPT_NOT_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
CHECK_SCRIPT_NOT_WASM)
#undef CHECK_SCRIPT_NOT_WASM
bool Script::is_wrapped() const {
- return eval_from_shared_or_wrapped_arguments().IsFixedArray();
+ return eval_from_shared_or_wrapped_arguments_or_sfi_table().IsFixedArray() &&
+ type() != TYPE_WEB_SNAPSHOT;
}
bool Script::has_eval_from_shared() const {
- return eval_from_shared_or_wrapped_arguments().IsSharedFunctionInfo();
+ return eval_from_shared_or_wrapped_arguments_or_sfi_table()
+ .IsSharedFunctionInfo();
}
void Script::set_eval_from_shared(SharedFunctionInfo shared,
WriteBarrierMode mode) {
DCHECK(!is_wrapped());
- set_eval_from_shared_or_wrapped_arguments(shared, mode);
+ DCHECK_NE(type(), TYPE_WEB_SNAPSHOT);
+ set_eval_from_shared_or_wrapped_arguments_or_sfi_table(shared, mode);
}
SharedFunctionInfo Script::eval_from_shared() const {
DCHECK(has_eval_from_shared());
- return SharedFunctionInfo::cast(eval_from_shared_or_wrapped_arguments());
+ return SharedFunctionInfo::cast(
+ eval_from_shared_or_wrapped_arguments_or_sfi_table());
}
void Script::set_wrapped_arguments(FixedArray value, WriteBarrierMode mode) {
DCHECK(!has_eval_from_shared());
- set_eval_from_shared_or_wrapped_arguments(value, mode);
+ DCHECK_NE(type(), TYPE_WEB_SNAPSHOT);
+ set_eval_from_shared_or_wrapped_arguments_or_sfi_table(value, mode);
}
FixedArray Script::wrapped_arguments() const {
DCHECK(is_wrapped());
- return FixedArray::cast(eval_from_shared_or_wrapped_arguments());
+ return FixedArray::cast(eval_from_shared_or_wrapped_arguments_or_sfi_table());
+}
+
+void Script::set_shared_function_info_table(ObjectHashTable value,
+ WriteBarrierMode mode) {
+ DCHECK(!has_eval_from_shared());
+ DCHECK(!is_wrapped());
+ DCHECK_EQ(type(), TYPE_WEB_SNAPSHOT);
+ set_eval_from_shared_or_wrapped_arguments_or_sfi_table(value, mode);
+}
+
+ObjectHashTable Script::shared_function_info_table() const {
+ DCHECK_EQ(type(), TYPE_WEB_SNAPSHOT);
+ return ObjectHashTable::cast(
+ eval_from_shared_or_wrapped_arguments_or_sfi_table());
}
DEF_GETTER(Script, shared_function_infos, WeakFixedArray) {
@@ -91,6 +110,15 @@ void Script::set_shared_function_infos(WeakFixedArray value,
CONDITIONAL_WRITE_BARRIER(*this, kSharedFunctionInfosOffset, value, mode);
}
+int Script::shared_function_info_count() const {
+ if V8_UNLIKELY (type() == TYPE_WEB_SNAPSHOT) {
+ // +1 because the 0th element in shared_function_infos is reserved for the
+ // top-level SharedFunctionInfo which doesn't exist.
+ return shared_function_info_table().NumberOfElements() + 1;
+ }
+ return shared_function_infos().length();
+}
+
#if V8_ENABLE_WEBASSEMBLY
bool Script::has_wasm_breakpoint_infos() const {
return type() == TYPE_WASM && wasm_breakpoint_infos().length() > 0;
diff --git a/chromium/v8/src/objects/script.h b/chromium/v8/src/objects/script.h
index e487da76497..3d2ff73b99a 100644
--- a/chromium/v8/src/objects/script.h
+++ b/chromium/v8/src/objects/script.h
@@ -20,6 +20,8 @@ namespace v8 {
namespace internal {
+class FunctionLiteral;
+
#include "torque-generated/src/objects/script-tq.inc"
// Script describes a script which has been added to the VM.
@@ -38,7 +40,8 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
#if V8_ENABLE_WEBASSEMBLY
TYPE_WASM = 3,
#endif // V8_ENABLE_WEBASSEMBLY
- TYPE_INSPECTOR = 4
+ TYPE_INSPECTOR = 4,
+ TYPE_WEB_SNAPSHOT = 5
};
// Script compilation types.
@@ -53,7 +56,7 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// [type]: the script type.
DECL_INT_ACCESSORS(type)
- DECL_ACCESSORS(eval_from_shared_or_wrapped_arguments, Object)
+ DECL_ACCESSORS(eval_from_shared_or_wrapped_arguments_or_sfi_table, Object)
// [eval_from_shared]: for eval scripts the shared function info for the
// function from which eval was called.
@@ -62,6 +65,12 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// [wrapped_arguments]: for the list of arguments in a wrapped script.
DECL_ACCESSORS(wrapped_arguments, FixedArray)
+ // For web snapshots: a hash table mapping function positions to indices in
+ // shared_function_infos.
+ // TODO(v8:11525): Replace with a more efficient data structure mapping
+ // function positions to weak pointers to SharedFunctionInfos directly.
+ DECL_ACCESSORS(shared_function_info_table, ObjectHashTable)
+
// Whether the script is implicitly wrapped in a function.
inline bool is_wrapped() const;
@@ -78,6 +87,8 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
// function infos created from this script.
DECL_ACCESSORS(shared_function_infos, WeakFixedArray)
+ inline int shared_function_info_count() const;
+
#if V8_ENABLE_WEBASSEMBLY
// [wasm_breakpoint_infos]: the list of {BreakPointInfo} objects describing
// all WebAssembly breakpoints for modules/instances managed via this script.
@@ -137,9 +148,9 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
static int GetEvalPosition(Isolate* isolate, Handle<Script> script);
// Init line_ends array with source code positions of line ends.
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- static void InitLineEnds(LocalIsolate* isolate, Handle<Script> script);
+ static void InitLineEnds(IsolateT* isolate, Handle<Script> script);
// Carries information about a source position.
struct PositionInfo {
@@ -176,10 +187,19 @@ class Script : public TorqueGeneratedScript<Script, Struct> {
int GetLineNumber(int code_pos) const;
// Look through the list of existing shared function infos to find one
- // that matches the function literal. Return empty handle if not found.
- template <typename LocalIsolate>
- MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
- LocalIsolate* isolate, int function_literal_id);
+ // that matches the function literal. Return empty handle if not found.
+ template <typename IsolateT>
+ static MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(
+ Handle<Script> script, IsolateT* isolate,
+ FunctionLiteral* function_literal);
+
+ static MaybeHandle<SharedFunctionInfo> FindWebSnapshotSharedFunctionInfo(
+ Handle<Script> script, Isolate* isolate,
+ FunctionLiteral* function_literal);
+
+ static MaybeHandle<SharedFunctionInfo> FindWebSnapshotSharedFunctionInfo(
+ Handle<Script> script, LocalIsolate* isolate,
+ FunctionLiteral* function_literal);
// Iterate over all script objects on the heap.
class V8_EXPORT_PRIVATE Iterator {
diff --git a/chromium/v8/src/objects/script.tq b/chromium/v8/src/objects/script.tq
index dac25360cb9..36a70dede58 100644
--- a/chromium/v8/src/objects/script.tq
+++ b/chromium/v8/src/objects/script.tq
@@ -40,8 +40,12 @@ extern class Script extends Struct {
// [id]: the script id.
id: Smi;
- eval_from_shared_or_wrapped_arguments: SharedFunctionInfo|FixedArray|
- Undefined;
+ // For scripts originating from eval: the SharedFunctionInfo contains the SFI
+ // for the script. For scripts wrapped as functions: the FixedArray contains
+ // the arguments. For web snapshots: the ObjectHashTable maps function start
+ // position to SFI index in shared_function_infos.
+ eval_from_shared_or_wrapped_arguments_or_sfi_table: SharedFunctionInfo|
+ FixedArray|ObjectHashTable|Undefined;
eval_from_position: Smi|Foreign; // Smi or Managed<wasm::NativeModule>
shared_function_infos: WeakFixedArray|WeakArrayList;
diff --git a/chromium/v8/src/objects/shared-function-info-inl.h b/chromium/v8/src/objects/shared-function-info-inl.h
index c125b01a3d0..3bc0f70572b 100644
--- a/chromium/v8/src/objects/shared-function-info-inl.h
+++ b/chromium/v8/src/objects/shared-function-info-inl.h
@@ -166,8 +166,8 @@ bool SharedFunctionInfo::needs_script_context() const {
return is_script() && scope_info().ContextLocalCount() > 0;
}
-template <typename LocalIsolate>
-AbstractCode SharedFunctionInfo::abstract_code(LocalIsolate* isolate) {
+template <typename IsolateT>
+AbstractCode SharedFunctionInfo::abstract_code(IsolateT* isolate) {
// TODO(v8:11429): Decide if this return bytecode or baseline code, when the
// latter is present.
if (HasBytecodeArray()) {
@@ -186,9 +186,8 @@ int SharedFunctionInfo::function_token_position() const {
}
}
-template <typename LocalIsolate>
-bool SharedFunctionInfo::AreSourcePositionsAvailable(
- LocalIsolate* isolate) const {
+template <typename IsolateT>
+bool SharedFunctionInfo::AreSourcePositionsAvailable(IsolateT* isolate) const {
if (FLAG_enable_lazy_source_positions) {
return !HasBytecodeArray() ||
GetBytecodeArray(isolate).HasSourcePositionTable();
@@ -196,9 +195,9 @@ bool SharedFunctionInfo::AreSourcePositionsAvailable(
return true;
}
-template <typename LocalIsolate>
+template <typename IsolateT>
SharedFunctionInfo::Inlineability SharedFunctionInfo::GetInlineability(
- LocalIsolate* isolate) const {
+ IsolateT* isolate) const {
if (!script().IsScript()) return kHasNoScript;
if (GetIsolate()->is_precise_binary_code_coverage() &&
@@ -235,9 +234,6 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2, may_have_cached_code,
- SharedFunctionInfo::MayHaveCachedCodeBit)
-
BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, syntax_kind,
SharedFunctionInfo::FunctionSyntaxKindBits)
@@ -453,9 +449,8 @@ bool SharedFunctionInfo::is_compiled() const {
!data.IsUncompiledData();
}
-template <typename LocalIsolate>
-IsCompiledScope SharedFunctionInfo::is_compiled_scope(
- LocalIsolate* isolate) const {
+template <typename IsolateT>
+IsCompiledScope SharedFunctionInfo::is_compiled_scope(IsolateT* isolate) const {
return IsCompiledScope(*this, isolate);
}
@@ -497,10 +492,9 @@ bool SharedFunctionInfo::HasBytecodeArray() const {
data.IsBaselineData();
}
-template <typename LocalIsolate>
-BytecodeArray SharedFunctionInfo::GetBytecodeArray(
- LocalIsolate* isolate) const {
- SharedMutexGuardIfOffThread<LocalIsolate, base::kShared> mutex_guard(
+template <typename IsolateT>
+BytecodeArray SharedFunctionInfo::GetBytecodeArray(IsolateT* isolate) const {
+ SharedMutexGuardIfOffThread<IsolateT, base::kShared> mutex_guard(
GetIsolate()->shared_function_info_access(), isolate);
DCHECK(HasBytecodeArray());
@@ -739,8 +733,8 @@ void SharedFunctionInfo::ClearPreparseData() {
UncompiledDataWithPreparseData::kSize);
STATIC_ASSERT(UncompiledDataWithoutPreparseData::kSize ==
UncompiledData::kHeaderSize);
- data.synchronized_set_map(
- GetReadOnlyRoots().uncompiled_data_without_preparse_data_map());
+ data.set_map(GetReadOnlyRoots().uncompiled_data_without_preparse_data_map(),
+ kReleaseStore);
// Fill the remaining space with filler.
heap->CreateFillerObjectAt(
diff --git a/chromium/v8/src/objects/shared-function-info.cc b/chromium/v8/src/objects/shared-function-info.cc
index 5f5917d64a9..054a314f454 100644
--- a/chromium/v8/src/objects/shared-function-info.cc
+++ b/chromium/v8/src/objects/shared-function-info.cc
@@ -431,12 +431,6 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
}
}
-MaybeHandle<Code> SharedFunctionInfo::TryGetCachedCode(Isolate* isolate) {
- if (!may_have_cached_code()) return {};
- Handle<SharedFunctionInfo> zis(*this, isolate);
- return isolate->compilation_cache()->LookupCode(zis);
-}
-
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
@@ -456,9 +450,9 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
}
// static
-template <typename LocalIsolate>
+template <typename IsolateT>
void SharedFunctionInfo::InitFromFunctionLiteral(
- LocalIsolate* isolate, Handle<SharedFunctionInfo> shared_info,
+ IsolateT* isolate, Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit, bool is_toplevel) {
DCHECK(!shared_info->name_or_scope_info(kAcquireLoad).IsScopeInfo());
diff --git a/chromium/v8/src/objects/shared-function-info.h b/chromium/v8/src/objects/shared-function-info.h
index f9db1acbcf7..3ca8a5758d1 100644
--- a/chromium/v8/src/objects/shared-function-info.h
+++ b/chromium/v8/src/objects/shared-function-info.h
@@ -191,8 +191,8 @@ class SharedFunctionInfo
// Get the abstract code associated with the function, which will either be
// a Code object or a BytecodeArray.
- template <typename LocalIsolate>
- inline AbstractCode abstract_code(LocalIsolate* isolate);
+ template <typename IsolateT>
+ inline AbstractCode abstract_code(IsolateT* isolate);
// Tells whether or not this shared function info has an attached
// BytecodeArray.
@@ -264,9 +264,8 @@ class SharedFunctionInfo
// Returns an IsCompiledScope which reports whether the function is compiled,
// and if compiled, will avoid the function becoming uncompiled while it is
// held.
- template <typename LocalIsolate>
- inline IsCompiledScope is_compiled_scope(LocalIsolate* isolate) const;
-
+ template <typename IsolateT>
+ inline IsCompiledScope is_compiled_scope(IsolateT* isolate) const;
// [internal formal parameter count]: The declared number of parameters.
// For subclass constructors, also includes new.target.
@@ -301,8 +300,8 @@ class SharedFunctionInfo
inline FunctionTemplateInfo get_api_func_data() const;
inline void set_api_func_data(FunctionTemplateInfo data);
inline bool HasBytecodeArray() const;
- template <typename LocalIsolate>
- inline BytecodeArray GetBytecodeArray(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ inline BytecodeArray GetBytecodeArray(IsolateT* isolate) const;
inline void set_bytecode_array(BytecodeArray bytecode);
inline Code InterpreterTrampoline() const;
@@ -417,15 +416,6 @@ class SharedFunctionInfo
DECL_BOOLEAN_ACCESSORS(class_scope_has_private_brand)
DECL_BOOLEAN_ACCESSORS(has_static_private_methods_or_accessors)
- // True if a Code object associated with this SFI has been inserted into the
- // compilation cache. Note that the cache entry may be removed by aging,
- // hence the 'may'.
- DECL_BOOLEAN_ACCESSORS(may_have_cached_code)
-
- // Returns the cached Code object for this SFI if it exists, an empty handle
- // otherwise.
- MaybeHandle<Code> TryGetCachedCode(Isolate* isolate);
-
// Is this function a top-level function (scripts, evals).
DECL_BOOLEAN_ACCESSORS(is_toplevel)
@@ -557,8 +547,8 @@ class SharedFunctionInfo
kExceedsBytecodeLimit,
kMayContainBreakPoints,
};
- template <typename LocalIsolate>
- Inlineability GetInlineability(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Inlineability GetInlineability(IsolateT* isolate) const;
// Source size of this function.
int SourceSize();
@@ -569,8 +559,8 @@ class SharedFunctionInfo
inline bool has_simple_parameters();
// Initialize a SharedFunctionInfo from a parsed function literal.
- template <typename LocalIsolate>
- static void InitFromFunctionLiteral(LocalIsolate* isolate,
+ template <typename IsolateT>
+ static void InitFromFunctionLiteral(IsolateT* isolate,
Handle<SharedFunctionInfo> shared_info,
FunctionLiteral* lit, bool is_toplevel);
@@ -587,8 +577,8 @@ class SharedFunctionInfo
static void EnsureSourcePositionsAvailable(
Isolate* isolate, Handle<SharedFunctionInfo> shared_info);
- template <typename LocalIsolate>
- bool AreSourcePositionsAvailable(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ bool AreSourcePositionsAvailable(IsolateT* isolate) const;
// Hash based on function literal id and script id.
V8_EXPORT_PRIVATE uint32_t Hash();
diff --git a/chromium/v8/src/objects/shared-function-info.tq b/chromium/v8/src/objects/shared-function-info.tq
index b38598efbb2..3c793efe9b0 100644
--- a/chromium/v8/src/objects/shared-function-info.tq
+++ b/chromium/v8/src/objects/shared-function-info.tq
@@ -49,7 +49,6 @@ bitfield struct SharedFunctionInfoFlags extends uint32 {
bitfield struct SharedFunctionInfoFlags2 extends uint8 {
class_scope_has_private_brand: bool: 1 bit;
has_static_private_methods_or_accessors: bool: 1 bit;
- may_have_cached_code: bool: 1 bit;
}
@export
diff --git a/chromium/v8/src/objects/slots-inl.h b/chromium/v8/src/objects/slots-inl.h
index c0d35c525fb..3672df06353 100644
--- a/chromium/v8/src/objects/slots-inl.h
+++ b/chromium/v8/src/objects/slots-inl.h
@@ -10,6 +10,7 @@
#include "src/common/ptr-compr-inl.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/heap-object.h"
+#include "src/objects/map.h"
#include "src/objects/maybe-object.h"
#include "src/objects/objects.h"
#include "src/objects/slots.h"
@@ -29,12 +30,32 @@ bool FullObjectSlot::contains_value(Address raw_value) const {
return base::AsAtomicPointer::Relaxed_Load(location()) == raw_value;
}
+bool FullObjectSlot::contains_map_value(Address raw_value) const {
+ return load_map().ptr() == raw_value;
+}
+
Object FullObjectSlot::operator*() const { return Object(*location()); }
Object FullObjectSlot::load(PtrComprCageBase cage_base) const { return **this; }
void FullObjectSlot::store(Object value) const { *location() = value.ptr(); }
+void FullObjectSlot::store_map(Map map) const {
+#ifdef V8_MAP_PACKING
+ *location() = MapWord::Pack(map.ptr());
+#else
+ store(map);
+#endif
+}
+
+Map FullObjectSlot::load_map() const {
+#ifdef V8_MAP_PACKING
+ return Map::unchecked_cast(Object(MapWord::Unpack(*location())));
+#else
+ return Map::unchecked_cast(Object(*location()));
+#endif
+}
+
Object FullObjectSlot::Acquire_Load() const {
return Object(base::AsAtomicPointer::Acquire_Load(location()));
}
diff --git a/chromium/v8/src/objects/slots.h b/chromium/v8/src/objects/slots.h
index 69c6a8a80bc..59b44d49b05 100644
--- a/chromium/v8/src/objects/slots.h
+++ b/chromium/v8/src/objects/slots.h
@@ -108,10 +108,14 @@ class FullObjectSlot : public SlotBase<FullObjectSlot, Address> {
// Compares memory representation of a value stored in the slot with given
// raw value.
inline bool contains_value(Address raw_value) const;
+ inline bool contains_map_value(Address raw_value) const;
inline Object operator*() const;
inline Object load(PtrComprCageBase cage_base) const;
inline void store(Object value) const;
+ inline void store_map(Map map) const;
+
+ inline Map load_map() const;
inline Object Acquire_Load() const;
inline Object Acquire_Load(PtrComprCageBase cage_base) const;
diff --git a/chromium/v8/src/objects/smi.h b/chromium/v8/src/objects/smi.h
index 44cd5f7446e..cc99d8f7d08 100644
--- a/chromium/v8/src/objects/smi.h
+++ b/chromium/v8/src/objects/smi.h
@@ -26,7 +26,7 @@ class Smi : public Object {
// in that we want them to be constexprs.
constexpr Smi() : Object() {}
explicit constexpr Smi(Address ptr) : Object(ptr) {
- CONSTEXPR_DCHECK(HAS_SMI_TAG(ptr));
+ DCHECK(HAS_SMI_TAG(ptr));
}
// Returns the integer value.
@@ -43,7 +43,7 @@ class Smi : public Object {
// Convert a value to a Smi object.
static inline constexpr Smi FromInt(int value) {
- CONSTEXPR_DCHECK(Smi::IsValid(value));
+ DCHECK(Smi::IsValid(value));
return Smi(Internals::IntToSmi(value));
}
@@ -69,8 +69,8 @@ class Smi : public Object {
// Returns whether value can be represented in a Smi.
static inline bool constexpr IsValid(intptr_t value) {
- CONSTEXPR_DCHECK(Internals::IsValidSmi(value) ==
- (value >= kMinValue && value <= kMaxValue));
+ DCHECK_EQ(Internals::IsValidSmi(value),
+ value >= kMinValue && value <= kMaxValue);
return Internals::IsValidSmi(value);
}
@@ -87,7 +87,7 @@ class Smi : public Object {
DECL_CAST(Smi)
// Dispatched behavior.
- V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const; // NOLINT
+ V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const;
DECL_VERIFIER(Smi)
// Since this is a constexpr, "calling" it is just as efficient
diff --git a/chromium/v8/src/objects/source-text-module.cc b/chromium/v8/src/objects/source-text-module.cc
index a905444da5f..755f826a87a 100644
--- a/chromium/v8/src/objects/source-text-module.cc
+++ b/chromium/v8/src/objects/source-text-module.cc
@@ -672,13 +672,13 @@ Handle<JSModuleNamespace> SourceTextModule::GetModuleNamespace(
MaybeHandle<JSObject> SourceTextModule::GetImportMeta(
Isolate* isolate, Handle<SourceTextModule> module) {
- Handle<HeapObject> import_meta(module->import_meta(), isolate);
+ Handle<HeapObject> import_meta(module->import_meta(kAcquireLoad), isolate);
if (import_meta->IsTheHole(isolate)) {
if (!isolate->RunHostInitializeImportMetaObjectCallback(module).ToHandle(
&import_meta)) {
return {};
}
- module->set_import_meta(*import_meta);
+ module->set_import_meta(*import_meta, kReleaseStore);
}
return Handle<JSObject>::cast(import_meta);
}
@@ -1181,7 +1181,7 @@ void SourceTextModule::Reset(Isolate* isolate,
Handle<SourceTextModule> module) {
Factory* factory = isolate->factory();
- DCHECK(module->import_meta().IsTheHole(isolate));
+ DCHECK(module->import_meta(kAcquireLoad).IsTheHole(isolate));
Handle<FixedArray> regular_exports =
factory->NewFixedArray(module->regular_exports().length());
diff --git a/chromium/v8/src/objects/source-text-module.h b/chromium/v8/src/objects/source-text-module.h
index 873a0cd7299..6f2a3cd0f72 100644
--- a/chromium/v8/src/objects/source-text-module.h
+++ b/chromium/v8/src/objects/source-text-module.h
@@ -219,8 +219,8 @@ class SourceTextModuleInfo : public FixedArray {
public:
DECL_CAST(SourceTextModuleInfo)
- template <typename LocalIsolate>
- static Handle<SourceTextModuleInfo> New(LocalIsolate* isolate, Zone* zone,
+ template <typename IsolateT>
+ static Handle<SourceTextModuleInfo> New(IsolateT* isolate, Zone* zone,
SourceTextModuleDescriptor* descr);
inline FixedArray module_requests() const;
@@ -267,9 +267,8 @@ class ModuleRequest
NEVER_READ_ONLY_SPACE
DECL_VERIFIER(ModuleRequest)
- template <typename LocalIsolate>
- static Handle<ModuleRequest> New(LocalIsolate* isolate,
- Handle<String> specifier,
+ template <typename IsolateT>
+ static Handle<ModuleRequest> New(IsolateT* isolate, Handle<String> specifier,
Handle<FixedArray> import_assertions,
int position);
@@ -287,9 +286,9 @@ class SourceTextModuleInfoEntry
DECL_PRINTER(SourceTextModuleInfoEntry)
DECL_VERIFIER(SourceTextModuleInfoEntry)
- template <typename LocalIsolate>
+ template <typename IsolateT>
static Handle<SourceTextModuleInfoEntry> New(
- LocalIsolate* isolate, Handle<PrimitiveHeapObject> export_name,
+ IsolateT* isolate, Handle<PrimitiveHeapObject> export_name,
Handle<PrimitiveHeapObject> local_name,
Handle<PrimitiveHeapObject> import_name, int module_request,
int cell_index, int beg_pos, int end_pos);
diff --git a/chromium/v8/src/objects/source-text-module.tq b/chromium/v8/src/objects/source-text-module.tq
index 5a21807cc5a..a3d565c908f 100644
--- a/chromium/v8/src/objects/source-text-module.tq
+++ b/chromium/v8/src/objects/source-text-module.tq
@@ -29,7 +29,7 @@ extern class SourceTextModule extends Module {
// The value of import.meta inside of this module.
// Lazily initialized on first access. It's the hole before first access and
// a JSObject afterwards.
- import_meta: TheHole|JSObject;
+ @cppAcquireLoad @cppReleaseStore import_meta: TheHole|JSObject;
// The first visited module of a cycle. For modules not in a cycle, this is
// the module itself. It's the hole before the module state transitions to
diff --git a/chromium/v8/src/objects/string-inl.h b/chromium/v8/src/objects/string-inl.h
index 912109b2e0a..8b71536d8f5 100644
--- a/chromium/v8/src/objects/string-inl.h
+++ b/chromium/v8/src/objects/string-inl.h
@@ -92,12 +92,12 @@ class V8_NODISCARD SharedStringAccessGuardIfNeeded {
base::Optional<base::SharedMutexGuard<base::kShared>> mutex_guard;
};
-int String::synchronized_length() const {
+int String::length(AcquireLoadTag) const {
return base::AsAtomic32::Acquire_Load(
reinterpret_cast<const int32_t*>(field_address(kLengthOffset)));
}
-void String::synchronized_set_length(int value) {
+void String::set_length(int value, ReleaseStoreTag) {
base::AsAtomic32::Release_Store(
reinterpret_cast<int32_t*>(field_address(kLengthOffset)), value);
}
@@ -119,7 +119,7 @@ CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
StringShape::StringShape(const String str)
- : type_(str.synchronized_map().instance_type()) {
+ : type_(str.map(kAcquireLoad).instance_type()) {
set_valid();
DCHECK_EQ(type_ & kIsNotStringMask, kStringTag);
}
@@ -335,8 +335,8 @@ class SequentialStringKey final : public StringTableKey {
chars_(chars),
convert_(convert) {}
- template <typename LocalIsolate>
- bool IsMatch(LocalIsolate* isolate, String s) {
+ template <typename IsolateT>
+ bool IsMatch(IsolateT* isolate, String s) {
return s.IsEqualTo<String::EqualityType::kNoLengthCheck>(chars_, isolate);
}
@@ -827,10 +827,10 @@ void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
// Due to ThinString rewriting, concurrent visitors need to read the length with
// acquire semantics.
inline int SeqOneByteString::AllocatedSize() {
- return SizeFor(synchronized_length());
+ return SizeFor(length(kAcquireLoad));
}
inline int SeqTwoByteString::AllocatedSize() {
- return SizeFor(synchronized_length());
+ return SizeFor(length(kAcquireLoad));
}
void SlicedString::set_parent(String parent, WriteBarrierMode mode) {
@@ -862,7 +862,8 @@ void ExternalString::AllocateExternalPointerEntries(Isolate* isolate) {
}
DEF_GETTER(ExternalString, resource_as_address, Address) {
- return ReadExternalPointerField(kResourceOffset, cage_base,
+ Isolate* isolate = GetIsolateForHeapSandbox(*this);
+ return ReadExternalPointerField(kResourceOffset, isolate,
kExternalStringResourceTag);
}
diff --git a/chromium/v8/src/objects/string-table.cc b/chromium/v8/src/objects/string-table.cc
index a5493761162..cd554e02202 100644
--- a/chromium/v8/src/objects/string-table.cc
+++ b/chromium/v8/src/objects/string-table.cc
@@ -69,8 +69,8 @@ int ComputeStringTableCapacityWithShrink(int current_capacity,
return new_capacity;
}
-template <typename LocalIsolate, typename StringTableKey>
-bool KeyIsMatch(LocalIsolate* isolate, StringTableKey* key, String string) {
+template <typename IsolateT, typename StringTableKey>
+bool KeyIsMatch(IsolateT* isolate, StringTableKey* key, String string) {
if (string.hash() != key->hash()) return false;
if (string.length() != key->length()) return false;
return key->IsMatch(isolate, string);
@@ -135,15 +135,15 @@ class StringTable::Data {
int number_of_elements() const { return number_of_elements_; }
int number_of_deleted_elements() const { return number_of_deleted_elements_; }
- template <typename LocalIsolate, typename StringTableKey>
- InternalIndex FindEntry(LocalIsolate* isolate, StringTableKey* key,
+ template <typename IsolateT, typename StringTableKey>
+ InternalIndex FindEntry(IsolateT* isolate, StringTableKey* key,
uint32_t hash) const;
InternalIndex FindInsertionEntry(PtrComprCageBase cage_base,
uint32_t hash) const;
- template <typename LocalIsolate, typename StringTableKey>
- InternalIndex FindEntryOrInsertionEntry(LocalIsolate* isolate,
+ template <typename IsolateT, typename StringTableKey>
+ InternalIndex FindEntryOrInsertionEntry(IsolateT* isolate,
StringTableKey* key,
uint32_t hash) const;
@@ -249,8 +249,8 @@ std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
return new_data;
}
-template <typename LocalIsolate, typename StringTableKey>
-InternalIndex StringTable::Data::FindEntry(LocalIsolate* isolate,
+template <typename IsolateT, typename StringTableKey>
+InternalIndex StringTable::Data::FindEntry(IsolateT* isolate,
StringTableKey* key,
uint32_t hash) const {
uint32_t count = 1;
@@ -281,9 +281,9 @@ InternalIndex StringTable::Data::FindInsertionEntry(PtrComprCageBase cage_base,
}
}
-template <typename LocalIsolate, typename StringTableKey>
+template <typename IsolateT, typename StringTableKey>
InternalIndex StringTable::Data::FindEntryOrInsertionEntry(
- LocalIsolate* isolate, StringTableKey* key, uint32_t hash) const {
+ IsolateT* isolate, StringTableKey* key, uint32_t hash) const {
InternalIndex insertion_entry = InternalIndex::NotFound();
uint32_t count = 1;
// EnsureCapacity will guarantee the hash table is never full.
@@ -428,9 +428,8 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
return result;
}
-template <typename StringTableKey, typename LocalIsolate>
-Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
- StringTableKey* key) {
+template <typename StringTableKey, typename IsolateT>
+Handle<String> StringTable::LookupKey(IsolateT* isolate, StringTableKey* key) {
// String table lookups are allowed to be concurrent, assuming that:
//
// - The Heap access is allowed to be concurrent (using LocalHeap or
diff --git a/chromium/v8/src/objects/string-table.h b/chromium/v8/src/objects/string-table.h
index fe87ce15f2b..fc630f156ca 100644
--- a/chromium/v8/src/objects/string-table.h
+++ b/chromium/v8/src/objects/string-table.h
@@ -63,8 +63,8 @@ class V8_EXPORT_PRIVATE StringTable {
// Find string in the string table, using the given key. If the string is not
// there yet, it is created (by the key) and added. The return value is the
// string found.
- template <typename StringTableKey, typename LocalIsolate>
- Handle<String> LookupKey(LocalIsolate* isolate, StringTableKey* key);
+ template <typename StringTableKey, typename IsolateT>
+ Handle<String> LookupKey(IsolateT* isolate, StringTableKey* key);
// {raw_string} must be a tagged String pointer.
// Returns a tagged pointer: either a Smi if the string is an array index, an
diff --git a/chromium/v8/src/objects/string.cc b/chromium/v8/src/objects/string.cc
index ffa1be3aa34..52bb9679a54 100644
--- a/chromium/v8/src/objects/string.cc
+++ b/chromium/v8/src/objects/string.cc
@@ -127,7 +127,7 @@ void String::MakeThin(Isolate* isolate, String internalized) {
ThinString thin = ThinString::unchecked_cast(*this);
thin.set_actual(internalized);
DCHECK_GE(old_size, ThinString::kSize);
- this->synchronized_set_map(*map);
+ this->set_map(*map, kReleaseStore);
Address thin_end = thin.address() + ThinString::kSize;
int size_delta = old_size - ThinString::kSize;
if (size_delta != 0) {
@@ -200,7 +200,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
- this->synchronized_set_map(new_map);
+ this->set_map(new_map, kReleaseStore);
ExternalTwoByteString self = ExternalTwoByteString::cast(*this);
self.AllocateExternalPointerEntries(isolate);
@@ -277,7 +277,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
// We are storing the new map using release store after creating a filler for
// the left-over space to avoid races with the sweeper thread.
- this->synchronized_set_map(new_map);
+ this->set_map(new_map, kReleaseStore);
ExternalOneByteString self = ExternalOneByteString::cast(*this);
self.AllocateExternalPointerEntries(isolate);
@@ -374,7 +374,7 @@ void String::StringShortPrint(StringStream* accumulator) {
accumulator->Put('>');
}
-void String::PrintUC16(std::ostream& os, int start, int end) { // NOLINT
+void String::PrintUC16(std::ostream& os, int start, int end) {
if (end < 0) end = length();
StringCharacterStream stream(*this, start);
for (int i = start; i < end && stream.HasMore(); i++) {
@@ -735,8 +735,8 @@ static void CalculateLineEndsImpl(std::vector<int>* line_ends,
}
}
-template <typename LocalIsolate>
-Handle<FixedArray> String::CalculateLineEnds(LocalIsolate* isolate,
+template <typename IsolateT>
+Handle<FixedArray> String::CalculateLineEnds(IsolateT* isolate,
Handle<String> src,
bool include_ending_line) {
src = Flatten(isolate, src);
@@ -1425,7 +1425,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
ClearRecordedSlots::kNo);
// We are storing the new length using release store after creating a filler
// for the left-over space to avoid races with the sweeper thread.
- string->synchronized_set_length(new_length);
+ string->set_length(new_length, kReleaseStore);
return string;
}
diff --git a/chromium/v8/src/objects/string.h b/chromium/v8/src/objects/string.h
index b8d47b5551f..b873a069ada 100644
--- a/chromium/v8/src/objects/string.h
+++ b/chromium/v8/src/objects/string.h
@@ -194,9 +194,10 @@ class String : public TorqueGeneratedString<String, Name> {
const byte* AddressOfCharacterAt(int start_index,
const DisallowGarbageCollection& no_gc);
- // Get and set the length of the string using acquire loads and release
- // stores.
- DECL_SYNCHRONIZED_INT_ACCESSORS(length)
+ // Forward declare the non-atomic (set_)length defined in torque.
+ using TorqueGeneratedString::length;
+ using TorqueGeneratedString::set_length;
+ DECL_RELEASE_ACQUIRE_INT_ACCESSORS(length)
// Returns whether this string has only one-byte chars, i.e. all of them can
// be one-byte encoded. This might be the case even if the string is
@@ -409,7 +410,7 @@ class String : public TorqueGeneratedString<String, Name> {
const char* PrefixForDebugPrint() const;
const char* SuffixForDebugPrint() const;
void StringShortPrint(StringStream* accumulator);
- void PrintUC16(std::ostream& os, int start = 0, int end = -1); // NOLINT
+ void PrintUC16(std::ostream& os, int start = 0, int end = -1);
void PrintUC16(StringStream* accumulator, int start, int end);
// Dispatched behavior.
@@ -530,8 +531,8 @@ class String : public TorqueGeneratedString<String, Name> {
Visitor* visitor, String string, int offset,
const SharedStringAccessGuardIfNeeded& access_guard);
- template <typename LocalIsolate>
- static Handle<FixedArray> CalculateLineEnds(LocalIsolate* isolate,
+ template <typename IsolateT>
+ static Handle<FixedArray> CalculateLineEnds(IsolateT* isolate,
Handle<String> string,
bool include_ending_line);
diff --git a/chromium/v8/src/objects/struct-inl.h b/chromium/v8/src/objects/struct-inl.h
index b313bc43f89..b062660fd1b 100644
--- a/chromium/v8/src/objects/struct-inl.h
+++ b/chromium/v8/src/objects/struct-inl.h
@@ -28,13 +28,6 @@ NEVER_READ_ONLY_SPACE_IMPL(AccessorPair)
TQ_OBJECT_CONSTRUCTORS_IMPL(ClassPositions)
-void Struct::InitializeBody(int object_size) {
- Object value = GetReadOnlyRoots().undefined_value();
- for (int offset = kHeaderSize; offset < object_size; offset += kTaggedSize) {
- WRITE_FIELD(*this, offset, value);
- }
-}
-
Object AccessorPair::get(AccessorComponent component) {
return component == ACCESSOR_GETTER ? getter() : setter();
}
diff --git a/chromium/v8/src/objects/struct.h b/chromium/v8/src/objects/struct.h
index fa4fe42b620..ca0dcf4ac10 100644
--- a/chromium/v8/src/objects/struct.h
+++ b/chromium/v8/src/objects/struct.h
@@ -21,7 +21,6 @@ namespace internal {
// identified in the type system.
class Struct : public TorqueGeneratedStruct<Struct, HeapObject> {
public:
- inline void InitializeBody(int object_size);
void BriefPrintDetails(std::ostream& os);
STATIC_ASSERT(kHeaderSize == HeapObject::kHeaderSize);
diff --git a/chromium/v8/src/objects/swiss-name-dictionary-inl.h b/chromium/v8/src/objects/swiss-name-dictionary-inl.h
index 343abfc8cc3..770c32b59f9 100644
--- a/chromium/v8/src/objects/swiss-name-dictionary-inl.h
+++ b/chromium/v8/src/objects/swiss-name-dictionary-inl.h
@@ -89,7 +89,7 @@ constexpr int SwissNameDictionary::CtrlTableSize(int capacity) {
// static
constexpr int SwissNameDictionary::SizeFor(int capacity) {
- CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+ DCHECK(IsValidCapacity(capacity));
return PropertyDetailsTableStartOffset(capacity) + capacity;
}
@@ -98,7 +98,7 @@ constexpr int SwissNameDictionary::SizeFor(int capacity) {
// Similar to Abseil's CapacityToGrowth.
// static
constexpr int SwissNameDictionary::MaxUsableCapacity(int capacity) {
- CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+ DCHECK(IsValidCapacity(capacity));
if (Group::kWidth == 8 && capacity == 4) {
// If the group size is 16 we can fully utilize capacity 4: There will be
@@ -146,9 +146,8 @@ void SwissNameDictionary::SetEntryForEnumerationIndex(int enumeration_index,
entry);
}
-template <typename LocalIsolate>
-InternalIndex SwissNameDictionary::FindEntry(LocalIsolate* isolate,
- Object key) {
+template <typename IsolateT>
+InternalIndex SwissNameDictionary::FindEntry(IsolateT* isolate, Object key) {
Name name = Name::cast(key);
DCHECK(name.IsUniqueName());
uint32_t hash = name.hash();
@@ -212,8 +211,8 @@ InternalIndex SwissNameDictionary::FindEntry(LocalIsolate* isolate,
}
}
-template <typename LocalIsolate>
-InternalIndex SwissNameDictionary::FindEntry(LocalIsolate* isolate,
+template <typename IsolateT>
+InternalIndex SwissNameDictionary::FindEntry(IsolateT* isolate,
Handle<Object> key) {
return FindEntry(isolate, *key);
}
@@ -318,9 +317,9 @@ PropertyDetails SwissNameDictionary::DetailsAt(InternalIndex entry) {
}
// static
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SwissNameDictionary> SwissNameDictionary::EnsureGrowable(
- LocalIsolate* isolate, Handle<SwissNameDictionary> table) {
+ IsolateT* isolate, Handle<SwissNameDictionary> table) {
int capacity = table->Capacity();
if (table->UsedCapacity() < MaxUsableCapacity(capacity)) {
@@ -444,7 +443,7 @@ int SwissNameDictionary::GetMetaTableField(ByteArray meta_table,
}
constexpr int SwissNameDictionary::MetaTableSizePerEntryFor(int capacity) {
- CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+ DCHECK(IsValidCapacity(capacity));
// See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
// |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
@@ -459,7 +458,7 @@ constexpr int SwissNameDictionary::MetaTableSizePerEntryFor(int capacity) {
}
constexpr int SwissNameDictionary::MetaTableSizeFor(int capacity) {
- CONSTEXPR_DCHECK(IsValidCapacity(capacity));
+ DCHECK(IsValidCapacity(capacity));
int per_entry_size = MetaTableSizePerEntryFor(capacity);
@@ -488,9 +487,9 @@ bool SwissNameDictionary::ToKey(ReadOnlyRoots roots, InternalIndex entry,
}
// static
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SwissNameDictionary> SwissNameDictionary::Add(
- LocalIsolate* isolate, Handle<SwissNameDictionary> original_table,
+ IsolateT* isolate, Handle<SwissNameDictionary> original_table,
Handle<Name> key, Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out) {
DCHECK(original_table->FindEntry(isolate, *key).is_not_found());
@@ -538,9 +537,9 @@ int SwissNameDictionary::AddInternal(Name key, Object value,
return target;
}
-template <typename LocalIsolate>
-void SwissNameDictionary::Initialize(LocalIsolate* isolate,
- ByteArray meta_table, int capacity) {
+template <typename IsolateT>
+void SwissNameDictionary::Initialize(IsolateT* isolate, ByteArray meta_table,
+ int capacity) {
DCHECK(IsValidCapacity(capacity));
DisallowHeapAllocation no_gc;
ReadOnlyRoots roots(isolate);
@@ -564,7 +563,7 @@ void SwissNameDictionary::Initialize(LocalIsolate* isolate,
SwissNameDictionary::IndexIterator::IndexIterator(
Handle<SwissNameDictionary> dict, int start)
: enum_index_{start}, dict_{dict} {
- if (!COMPRESS_POINTERS_BOOL && dict.is_null()) {
+ if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict.is_null()) {
used_capacity_ = 0;
} else {
used_capacity_ = dict->UsedCapacity();
@@ -609,7 +608,7 @@ SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::begin() {
}
SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
- if (!COMPRESS_POINTERS_BOOL && dict_.is_null()) {
+ if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict_.is_null()) {
return IndexIterator(dict_, 0);
} else {
DCHECK(!dict_.is_null());
@@ -620,12 +619,12 @@ SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
SwissNameDictionary::IndexIterable
SwissNameDictionary::IterateEntriesOrdered() {
// If we are supposed to iterate the empty dictionary (which is non-writable)
- // and pointer compression is disabled, we have no simple way to get the
- // isolate, which we would need to create a handle.
+ // and pointer compression with a per-Isolate cage is disabled, we have no
+ // simple way to get the isolate, which we would need to create a handle.
// TODO(emrich): Consider always using roots.empty_swiss_dictionary_handle()
// in the condition once this function gets Isolate as a parameter in order to
// avoid empty dict checks.
- if (!COMPRESS_POINTERS_BOOL && Capacity() == 0)
+ if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && Capacity() == 0)
return IndexIterable(Handle<SwissNameDictionary>::null());
Isolate* isolate;
@@ -661,7 +660,7 @@ constexpr int SwissNameDictionary::MaxCapacity() {
sizeof(uint32_t);
int result = (FixedArray::kMaxSize - const_size) / per_entry_size;
- CONSTEXPR_DCHECK(result <= Smi::kMaxValue);
+ DCHECK_GE(Smi::kMaxValue, result);
return result;
}
diff --git a/chromium/v8/src/objects/swiss-name-dictionary.cc b/chromium/v8/src/objects/swiss-name-dictionary.cc
index 5b567aeaeed..57038ca41d9 100644
--- a/chromium/v8/src/objects/swiss-name-dictionary.cc
+++ b/chromium/v8/src/objects/swiss-name-dictionary.cc
@@ -37,10 +37,9 @@ Handle<SwissNameDictionary> SwissNameDictionary::DeleteEntry(
}
// static
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<SwissNameDictionary> SwissNameDictionary::Rehash(
- LocalIsolate* isolate, Handle<SwissNameDictionary> table,
- int new_capacity) {
+ IsolateT* isolate, Handle<SwissNameDictionary> table, int new_capacity) {
DCHECK(IsValidCapacity(new_capacity));
DCHECK_LE(table->NumberOfElements(), MaxUsableCapacity(new_capacity));
ReadOnlyRoots roots(isolate);
diff --git a/chromium/v8/src/objects/swiss-name-dictionary.h b/chromium/v8/src/objects/swiss-name-dictionary.h
index 9ab225dd349..afafb83dd28 100644
--- a/chromium/v8/src/objects/swiss-name-dictionary.h
+++ b/chromium/v8/src/objects/swiss-name-dictionary.h
@@ -72,10 +72,10 @@ class V8_EXPORT_PRIVATE SwissNameDictionary : public HeapObject {
public:
using Group = swiss_table::Group;
- template <typename LocalIsolate>
+ template <typename IsolateT>
inline static Handle<SwissNameDictionary> Add(
- LocalIsolate* isolate, Handle<SwissNameDictionary> table,
- Handle<Name> key, Handle<Object> value, PropertyDetails details,
+ IsolateT* isolate, Handle<SwissNameDictionary> table, Handle<Name> key,
+ Handle<Object> value, PropertyDetails details,
InternalIndex* entry_out = nullptr);
static Handle<SwissNameDictionary> Shrink(Isolate* isolate,
@@ -84,16 +84,16 @@ class V8_EXPORT_PRIVATE SwissNameDictionary : public HeapObject {
static Handle<SwissNameDictionary> DeleteEntry(
Isolate* isolate, Handle<SwissNameDictionary> table, InternalIndex entry);
- template <typename LocalIsolate>
- inline InternalIndex FindEntry(LocalIsolate* isolate, Object key);
+ template <typename IsolateT>
+ inline InternalIndex FindEntry(IsolateT* isolate, Object key);
// This is to make the interfaces of NameDictionary::FindEntry and
// OrderedNameDictionary::FindEntry compatible.
// TODO(emrich) clean this up: NameDictionary uses Handle<Object>
// for FindEntry keys due to its Key typedef, but that's also used
// for adding, where we do need handles.
- template <typename LocalIsolate>
- inline InternalIndex FindEntry(LocalIsolate* isolate, Handle<Object> key);
+ template <typename IsolateT>
+ inline InternalIndex FindEntry(IsolateT* isolate, Handle<Object> key);
static inline bool IsKey(ReadOnlyRoots roots, Object key_candidate);
inline bool ToKey(ReadOnlyRoots roots, InternalIndex entry, Object* out_key);
@@ -123,11 +123,11 @@ class V8_EXPORT_PRIVATE SwissNameDictionary : public HeapObject {
// deleted entries (which reside in initialized memory, but are not compared).
bool EqualsForTesting(SwissNameDictionary other);
- template <typename LocalIsolate>
- void Initialize(LocalIsolate* isolate, ByteArray meta_table, int capacity);
+ template <typename IsolateT>
+ void Initialize(IsolateT* isolate, ByteArray meta_table, int capacity);
- template <typename LocalIsolate>
- static Handle<SwissNameDictionary> Rehash(LocalIsolate* isolate,
+ template <typename IsolateT>
+ static Handle<SwissNameDictionary> Rehash(IsolateT* isolate,
Handle<SwissNameDictionary> table,
int new_capacity);
void Rehash(Isolate* isolate);
@@ -262,9 +262,9 @@ class V8_EXPORT_PRIVATE SwissNameDictionary : public HeapObject {
using ctrl_t = swiss_table::ctrl_t;
using Ctrl = swiss_table::Ctrl;
- template <typename LocalIsolate>
+ template <typename IsolateT>
inline static Handle<SwissNameDictionary> EnsureGrowable(
- LocalIsolate* isolate, Handle<SwissNameDictionary> table);
+ IsolateT* isolate, Handle<SwissNameDictionary> table);
// Returns table of byte-encoded PropertyDetails (without enumeration index
// stored in PropertyDetails).
diff --git a/chromium/v8/src/objects/tagged-field-inl.h b/chromium/v8/src/objects/tagged-field-inl.h
index 513f6a02d97..5c3a18982e3 100644
--- a/chromium/v8/src/objects/tagged-field-inl.h
+++ b/chromium/v8/src/objects/tagged-field-inl.h
@@ -56,6 +56,7 @@ Tagged_t TaggedField<T, kFieldOffset>::full_to_tagged(Address value) {
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::load(HeapObject host, int offset) {
Tagged_t value = *location(host, offset);
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(host.ptr(), value));
}
@@ -64,6 +65,7 @@ template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
Tagged_t value = *location(host, offset);
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(cage_base, value));
}
@@ -73,7 +75,9 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, T value) {
#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, value);
#else
- *location(host) = full_to_tagged(value.ptr());
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset, HeapObject::kMapOffset);
+ *location(host) = full_to_tagged(ptr);
#endif
}
@@ -83,7 +87,9 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
#ifdef V8_ATOMIC_OBJECT_FIELD_WRITES
Relaxed_Store(host, offset, value);
#else
- *location(host, offset) = full_to_tagged(value.ptr());
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ *location(host, offset) = full_to_tagged(ptr);
#endif
}
@@ -91,6 +97,7 @@ void TaggedField<T, kFieldOffset>::store(HeapObject host, int offset, T value) {
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Relaxed_Load(HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(host.ptr(), value));
}
@@ -99,50 +106,89 @@ template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Relaxed_Load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, offset));
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(cage_base, value));
}
// static
template <typename T, int kFieldOffset>
-void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, T value) {
+T TaggedField<T, kFieldOffset>::Relaxed_Load_Map_Word(
+ PtrComprCageBase cage_base, HeapObject host) {
+ AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location(host, 0));
+ return T(tagged_to_full(cage_base, value));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Relaxed_Store_Map_Word(HeapObject host,
+ T value) {
AsAtomicTagged::Relaxed_Store(location(host), full_to_tagged(value.ptr()));
}
// static
template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, T value) {
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset, HeapObject::kMapOffset);
+ AsAtomicTagged::Relaxed_Store(location(host), full_to_tagged(ptr));
+}
+
+// static
+template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Relaxed_Store(HeapObject host, int offset,
T value) {
- AsAtomicTagged::Relaxed_Store(location(host, offset),
- full_to_tagged(value.ptr()));
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ AsAtomicTagged::Relaxed_Store(location(host, offset), full_to_tagged(ptr));
}
// static
template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Acquire_Load(HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(host.ptr(), value));
}
// static
template <typename T, int kFieldOffset>
+T TaggedField<T, kFieldOffset>::Acquire_Load_No_Unpack(
+ PtrComprCageBase cage_base, HeapObject host, int offset) {
+ AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
+ return T(tagged_to_full(cage_base, value));
+}
+
+template <typename T, int kFieldOffset>
T TaggedField<T, kFieldOffset>::Acquire_Load(PtrComprCageBase cage_base,
HeapObject host, int offset) {
AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location(host, offset));
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
return T(tagged_to_full(cage_base, value));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, T value) {
- AsAtomicTagged::Release_Store(location(host), full_to_tagged(value.ptr()));
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset, HeapObject::kMapOffset);
+ AsAtomicTagged::Release_Store(location(host), full_to_tagged(ptr));
+}
+
+// static
+template <typename T, int kFieldOffset>
+void TaggedField<T, kFieldOffset>::Release_Store_Map_Word(HeapObject host,
+ T value) {
+ Address ptr = value.ptr();
+ AsAtomicTagged::Release_Store(location(host), full_to_tagged(ptr));
}
// static
template <typename T, int kFieldOffset>
void TaggedField<T, kFieldOffset>::Release_Store(HeapObject host, int offset,
T value) {
- AsAtomicTagged::Release_Store(location(host, offset),
- full_to_tagged(value.ptr()));
+ Address ptr = value.ptr();
+ DCHECK_NE(kFieldOffset + offset, HeapObject::kMapOffset);
+ AsAtomicTagged::Release_Store(location(host, offset), full_to_tagged(ptr));
}
// static
diff --git a/chromium/v8/src/objects/tagged-field.h b/chromium/v8/src/objects/tagged-field.h
index e3950fa0af3..d9fc0bb1027 100644
--- a/chromium/v8/src/objects/tagged-field.h
+++ b/chromium/v8/src/objects/tagged-field.h
@@ -52,6 +52,8 @@ class TaggedField : public AllStatic {
static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
+ static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
+ HeapObject host, int offset = 0);
static inline T Acquire_Load(PtrComprCageBase cage_base, HeapObject host,
int offset = 0);
@@ -61,6 +63,13 @@ class TaggedField : public AllStatic {
static inline Tagged_t Release_CompareAndSwap(HeapObject host, T old,
T value);
+ // Note: Use these *_Map_Word methods only when loading a MapWord from a
+ // MapField.
+ static inline T Relaxed_Load_Map_Word(PtrComprCageBase cage_base,
+ HeapObject host);
+ static inline void Relaxed_Store_Map_Word(HeapObject host, T value);
+ static inline void Release_Store_Map_Word(HeapObject host, T value);
+
private:
static inline Tagged_t* location(HeapObject host, int offset = 0);
diff --git a/chromium/v8/src/objects/tagged-impl.h b/chromium/v8/src/objects/tagged-impl.h
index 9ef8b58d991..e7278a12451 100644
--- a/chromium/v8/src/objects/tagged-impl.h
+++ b/chromium/v8/src/objects/tagged-impl.h
@@ -88,8 +88,7 @@ class TaggedImpl {
// Returns true if this tagged value is a strong pointer to a HeapObject.
constexpr inline bool IsStrong() const {
- CONSTEXPR_DCHECK(kCanBeWeak ||
- (!IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_)));
+ DCHECK(kCanBeWeak || (!IsSmi() == HAS_STRONG_HEAP_OBJECT_TAG(ptr_)));
return kCanBeWeak ? HAS_STRONG_HEAP_OBJECT_TAG(ptr_) : !IsSmi();
}
diff --git a/chromium/v8/src/objects/tagged-index.h b/chromium/v8/src/objects/tagged-index.h
index e8cfbc76087..19812877cb7 100644
--- a/chromium/v8/src/objects/tagged-index.h
+++ b/chromium/v8/src/objects/tagged-index.h
@@ -38,7 +38,7 @@ class TaggedIndex : public Object {
// special in that we want them to be constexprs.
constexpr TaggedIndex() : Object() {}
explicit constexpr TaggedIndex(Address ptr) : Object(ptr) {
- CONSTEXPR_DCHECK(HAS_SMI_TAG(ptr));
+ DCHECK(HAS_SMI_TAG(ptr));
}
// Returns the integer value.
@@ -49,7 +49,7 @@ class TaggedIndex : public Object {
// Convert a value to a TaggedIndex object.
static inline TaggedIndex FromIntptr(intptr_t value) {
- CONSTEXPR_DCHECK(TaggedIndex::IsValid(value));
+ DCHECK(TaggedIndex::IsValid(value));
return TaggedIndex((static_cast<Address>(value) << kSmiTagSize) | kSmiTag);
}
diff --git a/chromium/v8/src/objects/templates-inl.h b/chromium/v8/src/objects/templates-inl.h
index d5a08fd88eb..d2d50762463 100644
--- a/chromium/v8/src/objects/templates-inl.h
+++ b/chromium/v8/src/objects/templates-inl.h
@@ -34,34 +34,10 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, read_only_prototype,
ReadOnlyPrototypeBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
RemovePrototypeBit::kShift)
-BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache, DoNotCacheBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
AcceptAnyReceiverBit::kShift)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, published, PublishedBit::kShift)
-// TODO(nicohartmann@, v8:11122): Let Torque generate this accessor.
-RELEASE_ACQUIRE_ACCESSORS(FunctionTemplateInfo, call_code, HeapObject,
- kCallCodeOffset)
-
-// TODO(nicohartmann@, v8:11122): Let Torque generate this accessor.
-HeapObject FunctionTemplateInfo::rare_data(AcquireLoadTag) const {
- PtrComprCageBase cage_base = GetPtrComprCageBase(*this);
- return rare_data(cage_base, kAcquireLoad);
-}
-HeapObject FunctionTemplateInfo::rare_data(PtrComprCageBase cage_base,
- AcquireLoadTag) const {
- HeapObject value =
- TaggedField<HeapObject>::Acquire_Load(cage_base, *this, kRareDataOffset);
- DCHECK(value.IsUndefined() || value.IsFunctionTemplateRareData());
- return value;
-}
-void FunctionTemplateInfo::set_rare_data(HeapObject value, ReleaseStoreTag,
- WriteBarrierMode mode) {
- DCHECK(value.IsUndefined() || value.IsFunctionTemplateRareData());
- RELEASE_WRITE_FIELD(*this, kRareDataOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kRareDataOffset, value, mode);
-}
-
// static
FunctionTemplateRareData FunctionTemplateInfo::EnsureFunctionTemplateRareData(
Isolate* isolate, Handle<FunctionTemplateInfo> function_template_info) {
@@ -100,10 +76,15 @@ RARE_ACCESSORS(instance_template, InstanceTemplate, HeapObject, undefined)
RARE_ACCESSORS(instance_call_handler, InstanceCallHandler, HeapObject,
undefined)
RARE_ACCESSORS(access_check_info, AccessCheckInfo, HeapObject, undefined)
-RARE_ACCESSORS(c_function, CFunction, Object, Smi(0))
-RARE_ACCESSORS(c_signature, CSignature, Object, Smi(0))
+RARE_ACCESSORS(c_function_overloads, CFunctionOverloads, FixedArray,
+ GetReadOnlyRoots(cage_base).empty_fixed_array())
#undef RARE_ACCESSORS
+bool TemplateInfo::should_cache() const {
+ return serial_number() != kDoNotCache;
+}
+bool TemplateInfo::is_cached() const { return serial_number() > kUncached; }
+
bool FunctionTemplateInfo::instantiated() {
return shared_function_info().IsSharedFunctionInfo();
}
diff --git a/chromium/v8/src/objects/templates.h b/chromium/v8/src/objects/templates.h
index 966b81167c4..b2401734323 100644
--- a/chromium/v8/src/objects/templates.h
+++ b/chromium/v8/src/objects/templates.h
@@ -12,6 +12,9 @@
#include "src/objects/object-macros.h"
namespace v8 {
+
+class CFunctionInfo;
+
namespace internal {
#include "torque-generated/src/objects/templates-tq.inc"
@@ -27,6 +30,16 @@ class TemplateInfo : public TorqueGeneratedTemplateInfo<TemplateInfo, Struct> {
// instead of caching them.
static const int kSlowTemplateInstantiationsCacheSize = 1 * MB;
+ // If the serial number is set to kDoNotCache, then we should never cache this
+ // TemplateInfo.
+ static const int kDoNotCache = -1;
+ // If the serial number is set to kUncached, it means that this TemplateInfo
+ // has not been cached yet but it can be.
+ static const int kUncached = -2;
+
+ inline bool should_cache() const;
+ inline bool is_cached() const;
+
TQ_OBJECT_CONSTRUCTORS(TemplateInfo)
};
@@ -35,6 +48,7 @@ class FunctionTemplateRareData
: public TorqueGeneratedFunctionTemplateRareData<FunctionTemplateRareData,
Struct> {
public:
+ DECL_VERIFIER(FunctionTemplateRareData)
TQ_OBJECT_CONSTRUCTORS(FunctionTemplateRareData)
};
@@ -83,20 +97,9 @@ class FunctionTemplateInfo
DECL_RARE_ACCESSORS(access_check_info, AccessCheckInfo, HeapObject)
- DECL_RARE_ACCESSORS(c_function, CFunction, Object)
- DECL_RARE_ACCESSORS(c_signature, CSignature, Object)
+ DECL_RARE_ACCESSORS(c_function_overloads, CFunctionOverloads, FixedArray)
#undef DECL_RARE_ACCESSORS
- // TODO(nicohartmann@, v8:11122): Let Torque generate the following accessor.
- DECL_RELEASE_ACQUIRE_ACCESSORS(call_code, HeapObject)
-
- // TODO(nicohartmann@, v8:11122): Let Torque generate the following accessor.
- inline HeapObject rare_data(AcquireLoadTag) const;
- inline HeapObject rare_data(PtrComprCageBase cage_base, AcquireLoadTag) const;
- inline void set_rare_data(
- HeapObject value, ReleaseStoreTag,
- WriteBarrierMode mode = WriteBarrierMode::UPDATE_WRITE_BARRIER);
-
// Begin flag bits ---------------------
DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -111,10 +114,6 @@ class FunctionTemplateInfo
// prototype_provoider_template are instantiated.
DECL_BOOLEAN_ACCESSORS(remove_prototype)
- // If set, do not attach a serial number to this FunctionTemplate and thus do
- // not keep an instance boilerplate around.
- DECL_BOOLEAN_ACCESSORS(do_not_cache)
-
// If not set an access may be performed on calling the associated JSFunction.
DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
@@ -128,8 +127,6 @@ class FunctionTemplateInfo
// Dispatched behavior.
DECL_PRINTER(FunctionTemplateInfo)
- static const int kInvalidSerialNumber = 0;
-
static Handle<SharedFunctionInfo> GetOrCreateSharedFunctionInfo(
Isolate* isolate, Handle<FunctionTemplateInfo> info,
MaybeHandle<Name> maybe_name);
@@ -146,14 +143,26 @@ class FunctionTemplateInfo
inline FunctionTemplateInfo GetParent(Isolate* isolate);
// Returns true if |object| is an instance of this function template.
inline bool IsTemplateFor(JSObject object);
- bool IsTemplateFor(Map map);
+ bool IsTemplateFor(Map map) const;
+ // Returns true if |object| is an API object and is constructed by this
+ // particular function template (skips walking up the chain of inheriting
+ // functions that is done by IsTemplateFor).
+ bool IsLeafTemplateForApiObject(Object object) const;
inline bool instantiated();
inline bool BreakAtEntry();
// Helper function for cached accessors.
- static MaybeHandle<Name> TryGetCachedPropertyName(Isolate* isolate,
- Handle<Object> getter);
+ static base::Optional<Name> TryGetCachedPropertyName(Isolate* isolate,
+ Object getter);
+ // Fast API overloads.
+ int GetCFunctionsCount() const;
+ Address GetCFunction(int index) const;
+ const CFunctionInfo* GetCSignature(int index) const;
+
+ // CFunction data for a set of overloads is stored into a FixedArray, as
+ // [address_0, signature_0, ... address_n-1, signature_n-1].
+ static const int kFunctionOverloadEntrySize = 2;
// Bit position in the flag, from least significant bit position.
DEFINE_TORQUE_GENERATED_FUNCTION_TEMPLATE_INFO_FLAGS()
diff --git a/chromium/v8/src/objects/templates.tq b/chromium/v8/src/objects/templates.tq
index e952747ecf7..1dec706d490 100644
--- a/chromium/v8/src/objects/templates.tq
+++ b/chromium/v8/src/objects/templates.tq
@@ -24,8 +24,7 @@ extern class FunctionTemplateRareData extends Struct {
instance_template: ObjectTemplateInfo|Undefined;
instance_call_handler: CallHandlerInfo|Undefined;
access_check_info: AccessCheckInfo|Undefined;
- c_function: Foreign|Zero;
- c_signature: Foreign|Zero;
+ c_function_overloads: FixedArray;
}
bitfield struct FunctionTemplateInfoFlags extends uint31 {
@@ -33,7 +32,6 @@ bitfield struct FunctionTemplateInfoFlags extends uint31 {
needs_access_check: bool: 1 bit;
read_only_prototype: bool: 1 bit;
remove_prototype: bool: 1 bit;
- do_not_cache: bool: 1 bit;
accept_any_receiver: bool: 1 bit;
published: bool: 1 bit;
}
@@ -42,7 +40,7 @@ bitfield struct FunctionTemplateInfoFlags extends uint31 {
extern class FunctionTemplateInfo extends TemplateInfo {
// Handler invoked when calling an instance of this FunctionTemplateInfo.
// Either CallHandlerInfo or Undefined.
- call_code: CallHandlerInfo|Undefined;
+ @cppAcquireLoad @cppReleaseStore call_code: CallHandlerInfo|Undefined;
class_name: String|Undefined;
// If the signature is a FunctionTemplateInfo it is used to check whether the
// receiver calling the associated JSFunction is a compatible receiver, i.e.
@@ -52,6 +50,8 @@ extern class FunctionTemplateInfo extends TemplateInfo {
// If any of the setters declared by DECL_RARE_ACCESSORS are used then a
// FunctionTemplateRareData will be stored here. Until then this contains
// undefined.
+ @cppAcquireLoad
+ @cppReleaseStore
rare_data: FunctionTemplateRareData|Undefined;
shared_function_info: SharedFunctionInfo|Undefined;
// Internal field to store a flag bitfield.
diff --git a/chromium/v8/src/objects/torque-defined-classes.h b/chromium/v8/src/objects/torque-defined-classes.h
index aeea4e1c536..4273d347117 100644
--- a/chromium/v8/src/objects/torque-defined-classes.h
+++ b/chromium/v8/src/objects/torque-defined-classes.h
@@ -7,6 +7,7 @@
#include "src/objects/descriptor-array.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-object.h"
+#include "src/objects/megadom-handler.h"
#include "src/objects/objects.h"
// Has to be the last include (doesn't have include guards):
diff --git a/chromium/v8/src/objects/transitions.cc b/chromium/v8/src/objects/transitions.cc
index ac908030a2d..a8bf5bbde0c 100644
--- a/chromium/v8/src/objects/transitions.cc
+++ b/chromium/v8/src/objects/transitions.cc
@@ -510,7 +510,7 @@ void TransitionsAccessor::EnsureHasFullTransitionArray() {
}
void TransitionsAccessor::TraverseTransitionTreeInternal(
- TraverseCallback callback, void* data, DisallowGarbageCollection* no_gc) {
+ TraverseCallback callback, DisallowGarbageCollection* no_gc) {
switch (encoding()) {
case kPrototypeInfo:
case kUninitialized:
@@ -520,7 +520,7 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
Map simple_target =
Map::cast(raw_transitions_->GetHeapObjectAssumeWeak());
TransitionsAccessor(isolate_, simple_target, no_gc)
- .TraverseTransitionTreeInternal(callback, data, no_gc);
+ .TraverseTransitionTreeInternal(callback, no_gc);
break;
}
case kFullTransitionArray: {
@@ -533,7 +533,7 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
HeapObject heap_object;
if (target->GetHeapObjectIfWeak(&heap_object)) {
TransitionsAccessor(isolate_, Map::cast(heap_object), no_gc)
- .TraverseTransitionTreeInternal(callback, data, no_gc);
+ .TraverseTransitionTreeInternal(callback, no_gc);
} else {
DCHECK(target->IsCleared());
}
@@ -541,12 +541,12 @@ void TransitionsAccessor::TraverseTransitionTreeInternal(
}
for (int i = 0; i < transitions().number_of_transitions(); ++i) {
TransitionsAccessor(isolate_, transitions().GetTarget(i), no_gc)
- .TraverseTransitionTreeInternal(callback, data, no_gc);
+ .TraverseTransitionTreeInternal(callback, no_gc);
}
break;
}
}
- callback(map_, data);
+ callback(map_);
}
#ifdef DEBUG
diff --git a/chromium/v8/src/objects/transitions.h b/chromium/v8/src/objects/transitions.h
index 237cfcd0efa..473827fd405 100644
--- a/chromium/v8/src/objects/transitions.h
+++ b/chromium/v8/src/objects/transitions.h
@@ -100,13 +100,13 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
PropertyAttributes* out_integrity_level = nullptr);
// ===== ITERATION =====
- using TraverseCallback = void (*)(Map map, void* data);
+ using TraverseCallback = std::function<void(Map)>;
// Traverse the transition tree in postorder.
- void TraverseTransitionTree(TraverseCallback callback, void* data) {
+ void TraverseTransitionTree(TraverseCallback callback) {
// Make sure that we do not allocate in the callback.
DisallowGarbageCollection no_gc;
- TraverseTransitionTreeInternal(callback, data, &no_gc);
+ TraverseTransitionTreeInternal(callback, &no_gc);
}
// ===== PROTOTYPE TRANSITIONS =====
@@ -192,7 +192,7 @@ class V8_EXPORT_PRIVATE TransitionsAccessor {
void SetPrototypeTransitions(Handle<WeakFixedArray> proto_transitions);
WeakFixedArray GetPrototypeTransitions();
- void TraverseTransitionTreeInternal(TraverseCallback callback, void* data,
+ void TraverseTransitionTreeInternal(TraverseCallback callback,
DisallowGarbageCollection* no_gc);
Isolate* isolate_;
diff --git a/chromium/v8/src/objects/value-serializer.cc b/chromium/v8/src/objects/value-serializer.cc
index 43d946943b0..9984f097a42 100644
--- a/chromium/v8/src/objects/value-serializer.cc
+++ b/chromium/v8/src/objects/value-serializer.cc
@@ -22,6 +22,7 @@
#include "src/objects/js-array-inl.h"
#include "src/objects/js-collection-inl.h"
#include "src/objects/js-regexp-inl.h"
+#include "src/objects/map-updater.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/objects/oddball-inl.h"
@@ -226,6 +227,8 @@ enum class ErrorTag : uint8_t {
kUriErrorPrototype = 'U',
// Followed by message: string.
kMessage = 'm',
+ // Followed by a JS object: cause.
+ kCause = 'c',
// Followed by stack: string.
kStack = 's',
// The end of this error information.
@@ -299,6 +302,9 @@ void ValueSerializer::WriteZigZag(T value) {
(value >> (8 * sizeof(T) - 1)));
}
+template EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) void ValueSerializer::WriteZigZag(int32_t value);
+
void ValueSerializer::WriteDouble(double value) {
// Warning: this uses host endianness.
WriteRawBytes(&value, sizeof(value));
@@ -934,6 +940,9 @@ Maybe<bool> ValueSerializer::WriteJSError(Handle<JSObject> error) {
Maybe<bool> message_found = JSReceiver::GetOwnPropertyDescriptor(
isolate_, error, isolate_->factory()->message_string(), &message_desc);
MAYBE_RETURN(message_found, Nothing<bool>());
+ PropertyDescriptor cause_desc;
+ Maybe<bool> cause_found = JSReceiver::GetOwnPropertyDescriptor(
+ isolate_, error, isolate_->factory()->cause_string(), &cause_desc);
WriteTag(SerializationTag::kError);
@@ -973,6 +982,15 @@ Maybe<bool> ValueSerializer::WriteJSError(Handle<JSObject> error) {
WriteString(message);
}
+ if (cause_found.FromJust() &&
+ PropertyDescriptor::IsDataDescriptor(&cause_desc)) {
+ Handle<Object> cause = cause_desc.value();
+ WriteVarint(static_cast<uint8_t>(ErrorTag::kCause));
+ if (!WriteObject(cause).FromMaybe(false)) {
+ return Nothing<bool>();
+ }
+ }
+
if (!Object::GetProperty(isolate_, error, isolate_->factory()->stack_string())
.ToHandle(&stack)) {
return Nothing<bool>();
@@ -1200,6 +1218,9 @@ Maybe<T> ValueDeserializer::ReadZigZag() {
-static_cast<T>(unsigned_value & 1)));
}
+template EXPORT_TEMPLATE_DEFINE(
+ V8_EXPORT_PRIVATE) Maybe<int32_t> ValueDeserializer::ReadZigZag();
+
Maybe<double> ValueDeserializer::ReadDouble() {
// Warning: this uses host endianness.
if (sizeof(double) > static_cast<unsigned>(end_ - position_))
@@ -1527,7 +1548,7 @@ MaybeHandle<JSArray> ValueDeserializer::ReadSparseJSArray() {
HandleScope scope(isolate_);
Handle<JSArray> array =
isolate_->factory()->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND);
- JSArray::SetLength(array, length);
+ MAYBE_RETURN(JSArray::SetLength(array, length), MaybeHandle<JSArray>());
AddObjectWithID(id, array);
uint32_t num_properties;
@@ -1874,6 +1895,7 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
MaybeHandle<Object> ValueDeserializer::ReadJSError() {
Handle<Object> message = isolate_->factory()->undefined_value();
+ Handle<Object> options = isolate_->factory()->undefined_value();
Handle<Object> stack = isolate_->factory()->undefined_value();
Handle<Object> no_caller;
auto constructor = isolate_->error_function();
@@ -1911,6 +1933,20 @@ MaybeHandle<Object> ValueDeserializer::ReadJSError() {
message = message_string;
break;
}
+ case ErrorTag::kCause: {
+ Handle<Object> cause;
+ if (!ReadObject().ToHandle(&cause)) {
+ return MaybeHandle<JSObject>();
+ }
+ options = isolate_->factory()->NewJSObject(isolate_->object_function());
+ if (JSObject::DefinePropertyOrElementIgnoreAttributes(
+ Handle<JSObject>::cast(options),
+ isolate_->factory()->cause_string(), cause, DONT_ENUM)
+ .is_null()) {
+ return MaybeHandle<JSObject>();
+ }
+ break;
+ }
case ErrorTag::kStack: {
Handle<String> stack_string;
if (!ReadString().ToHandle(&stack_string)) {
@@ -1929,7 +1965,7 @@ MaybeHandle<Object> ValueDeserializer::ReadJSError() {
Handle<Object> error;
if (!ErrorUtils::Construct(isolate_, constructor, constructor, message,
- SKIP_NONE, no_caller,
+ options, SKIP_NONE, no_caller,
ErrorUtils::StackTraceCollection::kNone)
.ToHandle(&error)) {
return MaybeHandle<Object>();
@@ -2101,9 +2137,9 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
.NowContains(value)) {
Handle<FieldType> value_type =
value->OptimalType(isolate_, expected_representation);
- Map::GeneralizeField(isolate_, target, descriptor,
- details.constness(), expected_representation,
- value_type);
+ MapUpdater::GeneralizeField(isolate_, target, descriptor,
+ details.constness(),
+ expected_representation, value_type);
}
DCHECK(target->instance_descriptors(isolate_)
.GetFieldType(descriptor)
@@ -2285,7 +2321,7 @@ ValueDeserializer::ReadObjectUsingEntireBufferForLegacyFormat() {
Handle<JSArray> js_array =
isolate_->factory()->NewJSArray(0, TERMINAL_FAST_ELEMENTS_KIND);
- JSArray::SetLength(js_array, length);
+ MAYBE_RETURN_NULL(JSArray::SetLength(js_array, length));
size_t begin_properties =
stack.size() - 2 * static_cast<size_t>(num_properties);
if (num_properties &&
diff --git a/chromium/v8/src/objects/visitors.h b/chromium/v8/src/objects/visitors.h
index 1111bf25129..947db3fa510 100644
--- a/chromium/v8/src/objects/visitors.h
+++ b/chromium/v8/src/objects/visitors.h
@@ -39,6 +39,8 @@ class CodeDataContainer;
V(kReadOnlyObjectCache, "(Read-only object cache)") \
V(kWeakCollections, "(Weak collections)") \
V(kWrapperTracing, "(Wrapper tracing)") \
+ V(kWriteBarrier, "(Write barrier)") \
+ V(kRetainMaps, "(Retain maps)") \
V(kUnknown, "(Unknown)")
class VisitorSynchronization : public AllStatic {
@@ -162,6 +164,9 @@ class ObjectVisitor {
// Visits the relocation info using the given iterator.
virtual void VisitRelocInfo(RelocIterator* it);
+
+ // Visits the object's map pointer, decoding as necessary
+ virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
};
} // namespace internal
diff --git a/chromium/v8/src/parsing/literal-buffer.cc b/chromium/v8/src/parsing/literal-buffer.cc
index 42a41cb15d6..a3e665a5c3c 100644
--- a/chromium/v8/src/parsing/literal-buffer.cc
+++ b/chromium/v8/src/parsing/literal-buffer.cc
@@ -12,8 +12,8 @@
namespace v8 {
namespace internal {
-template <typename LocalIsolate>
-Handle<String> LiteralBuffer::Internalize(LocalIsolate* isolate) const {
+template <typename IsolateT>
+Handle<String> LiteralBuffer::Internalize(IsolateT* isolate) const {
if (is_one_byte()) {
return isolate->factory()->InternalizeString(one_byte_literal());
}
diff --git a/chromium/v8/src/parsing/literal-buffer.h b/chromium/v8/src/parsing/literal-buffer.h
index 60c70a5b468..3a3457082c0 100644
--- a/chromium/v8/src/parsing/literal-buffer.h
+++ b/chromium/v8/src/parsing/literal-buffer.h
@@ -66,8 +66,8 @@ class LiteralBuffer final {
is_one_byte_ = true;
}
- template <typename LocalIsolate>
- Handle<String> Internalize(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Handle<String> Internalize(IsolateT* isolate) const;
private:
static const int kInitialCapacity = 16;
diff --git a/chromium/v8/src/parsing/parse-info.cc b/chromium/v8/src/parsing/parse-info.cc
index 69d18ef2b2b..6f469383a30 100644
--- a/chromium/v8/src/parsing/parse-info.cc
+++ b/chromium/v8/src/parsing/parse-info.cc
@@ -234,9 +234,9 @@ ParseInfo::~ParseInfo() = default;
DeclarationScope* ParseInfo::scope() const { return literal()->scope(); }
-template <typename LocalIsolate>
+template <typename IsolateT>
Handle<Script> ParseInfo::CreateScript(
- LocalIsolate* isolate, Handle<String> source,
+ IsolateT* isolate, Handle<String> source,
MaybeHandle<FixedArray> maybe_wrapped_arguments,
ScriptOriginOptions origin_options, NativesFlag natives) {
// Create a script object describing the script to be compiled.
diff --git a/chromium/v8/src/parsing/parse-info.h b/chromium/v8/src/parsing/parse-info.h
index 2068847efbc..d203ebed854 100644
--- a/chromium/v8/src/parsing/parse-info.h
+++ b/chromium/v8/src/parsing/parse-info.h
@@ -212,9 +212,9 @@ class V8_EXPORT_PRIVATE ParseInfo {
~ParseInfo();
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- Handle<Script> CreateScript(LocalIsolate* isolate, Handle<String> source,
+ Handle<Script> CreateScript(IsolateT* isolate, Handle<String> source,
MaybeHandle<FixedArray> maybe_wrapped_arguments,
ScriptOriginOptions origin_options,
NativesFlag natives = NOT_NATIVES_CODE);
diff --git a/chromium/v8/src/parsing/parser-base.h b/chromium/v8/src/parsing/parser-base.h
index db0966803b1..d30c3f1b1fc 100644
--- a/chromium/v8/src/parsing/parser-base.h
+++ b/chromium/v8/src/parsing/parser-base.h
@@ -1060,14 +1060,14 @@ class ParserBase {
bool is_resumable() const {
return IsResumableFunction(function_state_->kind());
}
- bool is_class_static_block() const {
- return function_state_->kind() ==
- FunctionKind::kClassStaticInitializerFunction;
- }
bool is_await_allowed() const {
return is_async_function() || (flags().allow_harmony_top_level_await() &&
IsModule(function_state_->kind()));
}
+ bool is_await_as_identifier_disallowed() {
+ return flags().is_module() ||
+ IsAwaitAsIdentifierDisallowed(function_state_->kind());
+ }
const PendingCompilationErrorHandler* pending_error_handler() const {
return pending_error_handler_;
}
@@ -1652,8 +1652,7 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
}
if (!Token::IsValidIdentifier(next, language_mode(), is_generator(),
- flags().is_module() || is_async_function() ||
- is_class_static_block())) {
+ is_await_as_identifier_disallowed())) {
ReportUnexpectedToken(next);
return impl()->EmptyIdentifierString();
}
@@ -1677,7 +1676,8 @@ typename ParserBase<Impl>::IdentifierT ParserBase<Impl>::ParseIdentifier(
if (!Token::IsValidIdentifier(
next, language_mode(), IsGeneratorFunction(function_kind),
- flags().is_module() || IsAsyncFunction(function_kind))) {
+ flags().is_module() ||
+ IsAwaitAsIdentifierDisallowed(function_kind))) {
ReportUnexpectedToken(next);
return impl()->EmptyIdentifierString();
}
@@ -2570,9 +2570,8 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
// IdentifierReference Initializer?
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- if (!Token::IsValidIdentifier(
- name_token, language_mode(), is_generator(),
- flags().is_module() || is_async_function())) {
+ if (!Token::IsValidIdentifier(name_token, language_mode(), is_generator(),
+ is_await_as_identifier_disallowed())) {
ReportUnexpectedToken(Next());
return impl()->NullLiteralProperty();
}
@@ -4427,6 +4426,12 @@ bool ParserBase<Impl>::IsNextLetKeyword() {
case Token::ASYNC:
return true;
case Token::FUTURE_STRICT_RESERVED_WORD:
+ case Token::ESCAPED_STRICT_RESERVED_WORD:
+ // The early error rule for future reserved keywords
+ // (ES#sec-identifiers-static-semantics-early-errors) uses the static
+ // semantics StringValue of IdentifierName, which normalizes escape
+ // sequences. So, both escaped and unescaped future reserved keywords are
+ // allowed as identifiers in sloppy mode.
return is_sloppy(language_mode());
default:
return false;
@@ -4437,12 +4442,11 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
const FormalParametersT& formal_parameters) {
- const RuntimeCallCounterId counters[2] = {
- RuntimeCallCounterId::kParseArrowFunctionLiteral,
- RuntimeCallCounterId::kPreParseArrowFunctionLiteral};
- RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
- counters[Impl::IsPreParser()],
- RuntimeCallStats::kThreadSpecific);
+ RCS_SCOPE(runtime_call_stats_,
+ Impl::IsPreParser()
+ ? RuntimeCallCounterId::kPreParseArrowFunctionLiteral
+ : RuntimeCallCounterId::kParseArrowFunctionLiteral,
+ RuntimeCallStats::kThreadSpecific);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -4631,7 +4635,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
ClassInfo class_info(this);
class_info.is_anonymous = is_anonymous;
- scope()->set_start_position(end_position());
+ scope()->set_start_position(class_token_pos);
if (Check(Token::EXTENDS)) {
ClassScope::HeritageParsingScope heritage(class_scope);
FuncNameInferrerState fni_state(&fni_);
@@ -5553,6 +5557,14 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement() {
case MODULE_SCOPE:
impl()->ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
return impl()->NullStatement();
+ case BLOCK_SCOPE:
+ // Class static blocks disallow return. They are their own var scopes and
+ // have a varblock scope.
+ if (function_state_->kind() == kClassStaticInitializerFunction) {
+ impl()->ReportMessageAt(loc, MessageTemplate::kIllegalReturn);
+ return impl()->NullStatement();
+ }
+ break;
default:
break;
}
@@ -5987,7 +5999,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
expression = ParseExpressionCoverGrammar();
// `for (async of` is disallowed but `for (async.x of` is allowed, so
// check if the token is ASYNC after parsing the expression.
- bool expression_is_async = scanner()->current_token() == Token::ASYNC;
+ bool expression_is_async = scanner()->current_token() == Token::ASYNC &&
+ !scanner()->literal_contains_escapes();
// Initializer is reference followed by in/of.
lhs_end_pos = end_position();
is_for_each = CheckInOrOf(&for_info.mode);
diff --git a/chromium/v8/src/parsing/parser.cc b/chromium/v8/src/parsing/parser.cc
index 92e11f6b9cc..0671fc5f6b8 100644
--- a/chromium/v8/src/parsing/parser.cc
+++ b/chromium/v8/src/parsing/parser.cc
@@ -522,10 +522,9 @@ void Parser::ParseProgram(Isolate* isolate, Handle<Script> script,
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
- RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_, flags().is_eval()
- ? RuntimeCallCounterId::kParseEval
- : RuntimeCallCounterId::kParseProgram);
+ RCS_SCOPE(runtime_call_stats_, flags().is_eval()
+ ? RuntimeCallCounterId::kParseEval
+ : RuntimeCallCounterId::kParseProgram);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseProgram");
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -704,9 +703,8 @@ void Parser::PostProcessParseResult(Isolate* isolate, ParseInfo* info,
if (isolate) info->ast_value_factory()->Internalize(isolate);
{
- RuntimeCallTimerScope runtimeTimer(info->runtime_call_stats(),
- RuntimeCallCounterId::kCompileAnalyse,
- RuntimeCallStats::kThreadSpecific);
+ RCS_SCOPE(info->runtime_call_stats(), RuntimeCallCounterId::kCompileAnalyse,
+ RuntimeCallStats::kThreadSpecific);
if (!Rewriter::Rewrite(info) || !DeclarationScope::Analyze(info)) {
// Null out the literal to indicate that something failed.
info->set_literal(nullptr);
@@ -819,8 +817,7 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
- RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
- RuntimeCallCounterId::kParseFunction);
+ RCS_SCOPE(runtime_call_stats_, RuntimeCallCounterId::kParseFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.ParseFunction");
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -833,14 +830,23 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
Scope::DeserializationMode::kIncludingVariables);
DCHECK_EQ(factory()->zone(), info->zone());
+ Handle<Script> script = handle(Script::cast(shared_info->script()), isolate);
if (shared_info->is_wrapped()) {
- maybe_wrapped_arguments_ = handle(
- Script::cast(shared_info->script()).wrapped_arguments(), isolate);
+ maybe_wrapped_arguments_ = handle(script->wrapped_arguments(), isolate);
}
int start_position = shared_info->StartPosition();
int end_position = shared_info->EndPosition();
int function_literal_id = shared_info->function_literal_id();
+ if V8_UNLIKELY (script->type() == Script::TYPE_WEB_SNAPSHOT) {
+ // Function literal IDs for inner functions haven't been allocated when
+ // deserializing. Put the inner function SFIs to the end of the list;
+ // they'll be deduplicated later (if the corresponding SFIs exist already)
+ // in Script::FindSharedFunctionInfo. (-1 here because function_literal_id
+ // is the parent's id. The inner function will get ids starting from
+ // function_literal_id + 1.)
+ function_literal_id = script->shared_function_info_count() - 1;
+ }
// Initialize parser state.
Handle<String> name(shared_info->Name(), isolate);
@@ -865,9 +871,10 @@ void Parser::ParseFunction(Isolate* isolate, ParseInfo* info,
if (result != nullptr) {
Handle<String> inferred_name(shared_info->inferred_name(), isolate);
result->set_inferred_name(inferred_name);
+ // Fix the function_literal_id in case we changed it earlier.
+ result->set_function_literal_id(shared_info->function_literal_id());
}
PostProcessParseResult(isolate, info, result);
-
if (V8_UNLIKELY(FLAG_log_function_events) && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
// We should already be internalized by now, so the debug name will be
@@ -971,6 +978,10 @@ FunctionLiteral* Parser::DoParseFunction(Isolate* isolate, ParseInfo* info,
if (p->initializer() != nullptr) {
reindexer.Reindex(p->initializer());
}
+ if (reindexer.HasStackOverflow()) {
+ set_stack_overflow();
+ return nullptr;
+ }
}
ResetFunctionLiteralId();
SkipFunctionLiterals(function_literal_id - 1);
@@ -2559,9 +2570,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
const bool is_lazy_top_level_function = is_lazy && is_top_level;
const bool is_lazy_inner_function = is_lazy && !is_top_level;
- RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_, RuntimeCallCounterId::kParseFunctionLiteral,
- RuntimeCallStats::kThreadSpecific);
+ RCS_SCOPE(runtime_call_stats_, RuntimeCallCounterId::kParseFunctionLiteral,
+ RuntimeCallStats::kThreadSpecific);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
@@ -2652,14 +2662,14 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
reinterpret_cast<const char*>(function_name->raw_data()),
function_name->byte_length(), function_name->is_one_byte());
}
- if (V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled()) &&
- did_preparse_successfully) {
- if (runtime_call_stats_) {
- runtime_call_stats_->CorrectCurrentCounterId(
- RuntimeCallCounterId::kPreParseWithVariableResolution,
- RuntimeCallStats::kThreadSpecific);
- }
+#ifdef V8_RUNTIME_CALL_STATS
+ if (did_preparse_successfully && runtime_call_stats_ &&
+ V8_UNLIKELY(TracingFlags::is_runtime_stats_enabled())) {
+ runtime_call_stats_->CorrectCurrentCounterId(
+ RuntimeCallCounterId::kPreParseWithVariableResolution,
+ RuntimeCallStats::kThreadSpecific);
}
+#endif // V8_RUNTIME_CALL_STATS
// Validate function name. We can do this only after parsing the function,
// since the function can declare itself strict.
@@ -3224,9 +3234,8 @@ void Parser::InsertSloppyBlockFunctionVarBindings(DeclarationScope* scope) {
// ----------------------------------------------------------------------------
// Parser support
-template <typename LocalIsolate>
-void Parser::HandleSourceURLComments(LocalIsolate* isolate,
- Handle<Script> script) {
+template <typename IsolateT>
+void Parser::HandleSourceURLComments(IsolateT* isolate, Handle<Script> script) {
Handle<String> source_url = scanner_.SourceUrl(isolate);
if (!source_url.is_null()) {
script->set_source_url(*source_url);
@@ -3264,8 +3273,7 @@ void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
void Parser::ParseOnBackground(ParseInfo* info, int start_position,
int end_position, int function_literal_id) {
- RuntimeCallTimerScope runtimeTimer(
- runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
+ RCS_SCOPE(runtime_call_stats_, RuntimeCallCounterId::kParseBackgroundProgram);
parsing_on_main_thread_ = false;
DCHECK_NULL(info->literal());
diff --git a/chromium/v8/src/parsing/parser.h b/chromium/v8/src/parsing/parser.h
index 4ede3035036..07efef277e1 100644
--- a/chromium/v8/src/parsing/parser.h
+++ b/chromium/v8/src/parsing/parser.h
@@ -161,8 +161,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Move statistics to Isolate
void UpdateStatistics(Isolate* isolate, Handle<Script> script);
- template <typename LocalIsolate>
- void HandleSourceURLComments(LocalIsolate* isolate, Handle<Script> script);
+ template <typename IsolateT>
+ void HandleSourceURLComments(IsolateT* isolate, Handle<Script> script);
private:
friend class ParserBase<Parser>;
@@ -654,15 +654,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
}
}
- // A shortcut for performing a ToString operation
- V8_INLINE Expression* ToString(Expression* expr) {
- if (expr->IsStringLiteral()) return expr;
- ScopedPtrList<Expression> args(pointer_buffer());
- args.Add(expr);
- return factory()->NewCallRuntime(Runtime::kInlineToString, args,
- expr->position());
- }
-
// Returns true if we have a binary expression between two numeric
// literals. In that case, *x will be changed to an expression which is the
// computed value.
diff --git a/chromium/v8/src/parsing/pending-compilation-error-handler.cc b/chromium/v8/src/parsing/pending-compilation-error-handler.cc
index dccd6dba77e..2ab5b3eb1ad 100644
--- a/chromium/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/chromium/v8/src/parsing/pending-compilation-error-handler.cc
@@ -31,9 +31,9 @@ void PendingCompilationErrorHandler::MessageDetails::SetString(
arg_handle_ = isolate->heap()->NewPersistentHandle(string);
}
-template <typename LocalIsolate>
+template <typename IsolateT>
void PendingCompilationErrorHandler::MessageDetails::Prepare(
- LocalIsolate* isolate) {
+ IsolateT* isolate) {
switch (type_) {
case kAstRawString:
return SetString(arg_->string(), isolate);
@@ -101,8 +101,8 @@ void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
MessageDetails(start_position, end_position, message, arg));
}
-template <typename LocalIsolate>
-void PendingCompilationErrorHandler::PrepareWarnings(LocalIsolate* isolate) {
+template <typename IsolateT>
+void PendingCompilationErrorHandler::PrepareWarnings(IsolateT* isolate) {
DCHECK(!has_pending_error());
for (MessageDetails& warning : warning_messages_) {
@@ -128,9 +128,9 @@ void PendingCompilationErrorHandler::ReportWarnings(
}
}
-template <typename LocalIsolate>
+template <typename IsolateT>
void PendingCompilationErrorHandler::PrepareErrors(
- LocalIsolate* isolate, AstValueFactory* ast_value_factory) {
+ IsolateT* isolate, AstValueFactory* ast_value_factory) {
if (stack_overflow()) return;
DCHECK(has_pending_error());
diff --git a/chromium/v8/src/parsing/pending-compilation-error-handler.h b/chromium/v8/src/parsing/pending-compilation-error-handler.h
index 5efb5a43a48..31e765d5145 100644
--- a/chromium/v8/src/parsing/pending-compilation-error-handler.h
+++ b/chromium/v8/src/parsing/pending-compilation-error-handler.h
@@ -53,15 +53,15 @@ class PendingCompilationErrorHandler {
bool has_pending_warnings() const { return !warning_messages_.empty(); }
// Handle errors detected during parsing.
- template <typename LocalIsolate>
+ template <typename IsolateT>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
- void PrepareErrors(LocalIsolate* isolate, AstValueFactory* ast_value_factory);
+ void PrepareErrors(IsolateT* isolate, AstValueFactory* ast_value_factory);
V8_EXPORT_PRIVATE void ReportErrors(Isolate* isolate,
Handle<Script> script) const;
// Handle warnings detected during compilation.
- template <typename LocalIsolate>
- void PrepareWarnings(LocalIsolate* isolate);
+ template <typename IsolateT>
+ void PrepareWarnings(IsolateT* isolate);
void ReportWarnings(Isolate* isolate, Handle<Script> script) const;
V8_EXPORT_PRIVATE Handle<String> FormatErrorMessageForTest(Isolate* isolate);
@@ -106,8 +106,8 @@ class PendingCompilationErrorHandler {
MessageLocation GetLocation(Handle<Script> script) const;
MessageTemplate message() const { return message_; }
- template <typename LocalIsolate>
- void Prepare(LocalIsolate* isolate);
+ template <typename IsolateT>
+ void Prepare(IsolateT* isolate);
private:
enum Type { kNone, kAstRawString, kConstCharString, kMainThreadHandle };
diff --git a/chromium/v8/src/parsing/preparser.cc b/chromium/v8/src/parsing/preparser.cc
index 2764e00a8bf..4547e602ee3 100644
--- a/chromium/v8/src/parsing/preparser.cc
+++ b/chromium/v8/src/parsing/preparser.cc
@@ -272,10 +272,9 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
DCHECK_NE(FunctionSyntaxKind::kWrapped, function_syntax_kind);
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
- RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_,
- RuntimeCallCounterId::kPreParseWithVariableResolution,
- RuntimeCallStats::kThreadSpecific);
+ RCS_SCOPE(runtime_call_stats_,
+ RuntimeCallCounterId::kPreParseWithVariableResolution,
+ RuntimeCallStats::kThreadSpecific);
base::ElapsedTimer timer;
if (V8_UNLIKELY(FLAG_log_function_events)) timer.Start();
diff --git a/chromium/v8/src/parsing/rewriter.cc b/chromium/v8/src/parsing/rewriter.cc
index dee07632803..35f82e25ab4 100644
--- a/chromium/v8/src/parsing/rewriter.cc
+++ b/chromium/v8/src/parsing/rewriter.cc
@@ -382,10 +382,9 @@ DECLARATION_NODE_LIST(DEF_VISIT)
// Assumes code has been parsed. Mutates the AST, so the AST should not
// continue to be used in the case of failure.
bool Rewriter::Rewrite(ParseInfo* info) {
- RuntimeCallTimerScope runtimeTimer(
- info->runtime_call_stats(),
- RuntimeCallCounterId::kCompileRewriteReturnResult,
- RuntimeCallStats::kThreadSpecific);
+ RCS_SCOPE(info->runtime_call_stats(),
+ RuntimeCallCounterId::kCompileRewriteReturnResult,
+ RuntimeCallStats::kThreadSpecific);
FunctionLiteral* function = info->literal();
DCHECK_NOT_NULL(function);
diff --git a/chromium/v8/src/parsing/scanner-character-streams.cc b/chromium/v8/src/parsing/scanner-character-streams.cc
index 434b83676c0..a5fec69e84c 100644
--- a/chromium/v8/src/parsing/scanner-character-streams.cc
+++ b/chromium/v8/src/parsing/scanner-character-streams.cc
@@ -215,8 +215,7 @@ class ChunkedStream {
const uint8_t* data = nullptr;
size_t length;
{
- RuntimeCallTimerScope scope(stats,
- RuntimeCallCounterId::kGetMoreDataCallback);
+ RCS_SCOPE(stats, RuntimeCallCounterId::kGetMoreDataCallback);
length = source_->GetMoreData(&data);
}
ProcessChunk(data, position, length);
@@ -721,8 +720,7 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
}
bool Utf8ExternalStreamingStream::FetchChunk() {
- RuntimeCallTimerScope scope(runtime_call_stats(),
- RuntimeCallCounterId::kGetMoreDataCallback);
+ RCS_SCOPE(runtime_call_stats(), RuntimeCallCounterId::kGetMoreDataCallback);
DCHECK_EQ(current_.chunk_no, chunks_.size());
DCHECK(chunks_.empty() || chunks_.back().length != 0);
diff --git a/chromium/v8/src/parsing/scanner.cc b/chromium/v8/src/parsing/scanner.cc
index c5960787629..8ee64a16966 100644
--- a/chromium/v8/src/parsing/scanner.cc
+++ b/chromium/v8/src/parsing/scanner.cc
@@ -586,8 +586,8 @@ Token::Value Scanner::ScanTemplateSpan() {
return result;
}
-template <typename LocalIsolate>
-Handle<String> Scanner::SourceUrl(LocalIsolate* isolate) const {
+template <typename IsolateT>
+Handle<String> Scanner::SourceUrl(IsolateT* isolate) const {
Handle<String> tmp;
if (source_url_.length() > 0) {
tmp = source_url_.Internalize(isolate);
@@ -598,8 +598,8 @@ Handle<String> Scanner::SourceUrl(LocalIsolate* isolate) const {
template Handle<String> Scanner::SourceUrl(Isolate* isolate) const;
template Handle<String> Scanner::SourceUrl(LocalIsolate* isolate) const;
-template <typename LocalIsolate>
-Handle<String> Scanner::SourceMappingUrl(LocalIsolate* isolate) const {
+template <typename IsolateT>
+Handle<String> Scanner::SourceMappingUrl(IsolateT* isolate) const {
Handle<String> tmp;
if (source_mapping_url_.length() > 0) {
tmp = source_mapping_url_.Internalize(isolate);
diff --git a/chromium/v8/src/parsing/scanner.h b/chromium/v8/src/parsing/scanner.h
index 3df234ce54a..d93f581366f 100644
--- a/chromium/v8/src/parsing/scanner.h
+++ b/chromium/v8/src/parsing/scanner.h
@@ -407,10 +407,10 @@ class V8_EXPORT_PRIVATE Scanner {
return ScanTemplateSpan();
}
- template <typename LocalIsolate>
- Handle<String> SourceUrl(LocalIsolate* isolate) const;
- template <typename LocalIsolate>
- Handle<String> SourceMappingUrl(LocalIsolate* isolate) const;
+ template <typename IsolateT>
+ Handle<String> SourceUrl(IsolateT* isolate) const;
+ template <typename IsolateT>
+ Handle<String> SourceMappingUrl(IsolateT* isolate) const;
bool FoundHtmlComment() const { return found_html_comment_; }
diff --git a/chromium/v8/src/profiler/cpu-profiler.cc b/chromium/v8/src/profiler/cpu-profiler.cc
index eba513b39d5..ba9022be1b0 100644
--- a/chromium/v8/src/profiler/cpu-profiler.cc
+++ b/chromium/v8/src/profiler/cpu-profiler.cc
@@ -104,10 +104,11 @@ ProfilingScope::~ProfilingScope() {
ProfilerEventsProcessor::ProfilerEventsProcessor(
Isolate* isolate, Symbolizer* symbolizer,
- ProfilerCodeObserver* code_observer)
+ ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
symbolizer_(symbolizer),
code_observer_(code_observer),
+ profiles_(profiles),
last_code_event_id_(0),
last_processed_code_event_id_(0),
isolate_(isolate) {
@@ -119,9 +120,8 @@ SamplingEventsProcessor::SamplingEventsProcessor(
Isolate* isolate, Symbolizer* symbolizer,
ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
base::TimeDelta period, bool use_precise_sampling)
- : ProfilerEventsProcessor(isolate, symbolizer, code_observer),
+ : ProfilerEventsProcessor(isolate, symbolizer, code_observer, profiles),
sampler_(new CpuSampler(isolate, this)),
- profiles_(profiles),
period_(period),
use_precise_sampling_(use_precise_sampling) {
sampler_->Start();
@@ -188,7 +188,14 @@ void ProfilerEventsProcessor::StopSynchronously() {
bool ProfilerEventsProcessor::ProcessCodeEvent() {
CodeEventsContainer record;
if (events_buffer_.Dequeue(&record)) {
- code_observer_->CodeEventHandlerInternal(record);
+ if (record.generic.type == CodeEventRecord::NATIVE_CONTEXT_MOVE) {
+ NativeContextMoveEventRecord& nc_record =
+ record.NativeContextMoveEventRecord_;
+ profiles_->UpdateNativeContextAddressForCurrentProfiles(
+ nc_record.from_address, nc_record.to_address);
+ } else {
+ code_observer_->CodeEventHandlerInternal(record);
+ }
last_processed_code_event_id_ = record.generic.order;
return true;
}
@@ -202,6 +209,7 @@ void ProfilerEventsProcessor::CodeEventHandler(
case CodeEventRecord::CODE_MOVE:
case CodeEventRecord::CODE_DISABLE_OPT:
case CodeEventRecord::CODE_DELETE:
+ case CodeEventRecord::NATIVE_CONTEXT_MOVE:
Enqueue(evt_rec);
break;
case CodeEventRecord::CODE_DEOPT: {
@@ -224,7 +232,8 @@ void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
symbolizer_->SymbolizeTickSample(record->sample);
profiles_->AddPathToCurrentProfiles(
record->sample.timestamp, symbolized.stack_trace, symbolized.src_line,
- record->sample.update_stats, record->sample.sampling_interval);
+ record->sample.update_stats, record->sample.sampling_interval,
+ reinterpret_cast<Address>(record->sample.context));
}
ProfilerEventsProcessor::SampleProcessingResult
@@ -371,6 +380,7 @@ void ProfilerCodeObserver::CodeEventHandlerInternal(
}
void ProfilerCodeObserver::CreateEntriesForRuntimeCallStats() {
+#ifdef V8_RUNTIME_CALL_STATS
RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
RuntimeCallCounter* counter = rcs->GetCounter(i);
@@ -379,6 +389,7 @@ void ProfilerCodeObserver::CreateEntriesForRuntimeCallStats() {
"native V8Runtime");
code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
}
+#endif // V8_RUNTIME_CALL_STATS
}
void ProfilerCodeObserver::LogBuiltins() {
diff --git a/chromium/v8/src/profiler/cpu-profiler.h b/chromium/v8/src/profiler/cpu-profiler.h
index d605a8c3d3d..ced37e4ade5 100644
--- a/chromium/v8/src/profiler/cpu-profiler.h
+++ b/chromium/v8/src/profiler/cpu-profiler.h
@@ -37,10 +37,14 @@ class Symbolizer;
V(REPORT_BUILTIN, ReportBuiltinEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord)
+#define VM_EVENTS_TYPE_LIST(V) \
+ CODE_EVENTS_TYPE_LIST(V) \
+ V(NATIVE_CONTEXT_MOVE, NativeContextMoveEventRecord)
+
class CodeEventRecord {
public:
#define DECLARE_TYPE(type, ignore) type,
- enum Type { NONE = 0, CODE_EVENTS_TYPE_LIST(DECLARE_TYPE) };
+ enum Type { NONE = 0, VM_EVENTS_TYPE_LIST(DECLARE_TYPE) };
#undef DECLARE_TYPE
Type type;
@@ -99,6 +103,13 @@ class ReportBuiltinEventRecord : public CodeEventRecord {
V8_INLINE void UpdateCodeMap(CodeMap* code_map);
};
+// Signals that a native context's address has changed.
+class NativeContextMoveEventRecord : public CodeEventRecord {
+ public:
+ Address from_address;
+ Address to_address;
+};
+
// A record type for sending samples from the main thread/signal handler to the
// profiling thread.
class TickSampleEventRecord {
@@ -130,7 +141,7 @@ class CodeEventsContainer {
union {
CodeEventRecord generic;
#define DECLARE_CLASS(ignore, type) type type##_;
- CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
+ VM_EVENTS_TYPE_LIST(DECLARE_CLASS)
#undef DECLARE_CLASS
};
};
@@ -174,7 +185,8 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
protected:
ProfilerEventsProcessor(Isolate* isolate, Symbolizer* symbolizer,
- ProfilerCodeObserver* code_observer);
+ ProfilerCodeObserver* code_observer,
+ CpuProfilesCollection* profiles);
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent();
@@ -188,6 +200,7 @@ class V8_EXPORT_PRIVATE ProfilerEventsProcessor : public base::Thread,
Symbolizer* symbolizer_;
ProfilerCodeObserver* code_observer_;
+ CpuProfilesCollection* profiles_;
std::atomic_bool running_{true};
base::ConditionVariable running_cond_;
base::Mutex running_mutex_;
@@ -238,7 +251,6 @@ class V8_EXPORT_PRIVATE SamplingEventsProcessor
SamplingCircularQueue<TickSampleEventRecord,
kTickSampleQueueLength> ticks_buffer_;
std::unique_ptr<sampler::Sampler> sampler_;
- CpuProfilesCollection* profiles_;
base::TimeDelta period_; // Samples & code events processing period.
const bool use_precise_sampling_; // Whether or not busy-waiting is used for
// low sampling intervals on Windows.
diff --git a/chromium/v8/src/profiler/heap-profiler.cc b/chromium/v8/src/profiler/heap-profiler.cc
index 8a7ed34d46e..dbe48876d2e 100644
--- a/chromium/v8/src/profiler/heap-profiler.cc
+++ b/chromium/v8/src/profiler/heap-profiler.cc
@@ -81,9 +81,10 @@ v8::EmbedderGraph::Node::Detachedness HeapProfiler::GetDetachedness(
HeapSnapshot* HeapProfiler::TakeSnapshot(
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver,
- bool treat_global_objects_as_roots) {
+ bool treat_global_objects_as_roots, bool capture_numeric_value) {
is_taking_snapshot_ = true;
- HeapSnapshot* result = new HeapSnapshot(this, treat_global_objects_as_roots);
+ HeapSnapshot* result = new HeapSnapshot(this, treat_global_objects_as_roots,
+ capture_numeric_value);
{
HeapSnapshotGenerator generator(result, control, resolver, heap());
if (!generator.GenerateSnapshot()) {
diff --git a/chromium/v8/src/profiler/heap-profiler.h b/chromium/v8/src/profiler/heap-profiler.h
index ebf737523cc..fc867e66324 100644
--- a/chromium/v8/src/profiler/heap-profiler.h
+++ b/chromium/v8/src/profiler/heap-profiler.h
@@ -33,7 +33,8 @@ class HeapProfiler : public HeapObjectAllocationTracker {
HeapSnapshot* TakeSnapshot(v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver,
- bool treat_global_objects_as_roots);
+ bool treat_global_objects_as_roots,
+ bool capture_numeric_value);
bool StartSamplingHeapProfiler(uint64_t sample_interval, int stack_depth,
v8::HeapProfiler::SamplingFlags);
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.cc b/chromium/v8/src/profiler/heap-snapshot-generator.cc
index 9cc26fa3e20..da4e57fad9a 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.cc
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.cc
@@ -183,9 +183,11 @@ const char* HeapEntry::TypeAsString() const {
}
}
-HeapSnapshot::HeapSnapshot(HeapProfiler* profiler, bool global_objects_as_roots)
+HeapSnapshot::HeapSnapshot(HeapProfiler* profiler, bool global_objects_as_roots,
+ bool capture_numeric_value)
: profiler_(profiler),
- treat_global_objects_as_roots_(global_objects_as_roots) {
+ treat_global_objects_as_roots_(global_objects_as_roots),
+ capture_numeric_value_(capture_numeric_value) {
// It is very important to keep objects that form a heap snapshot
// as small as possible. Check assumptions about data structure sizes.
STATIC_ASSERT(kSystemPointerSize != 4 || sizeof(HeapGraphEdge) == 12);
@@ -387,8 +389,7 @@ SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
return entry_info.id;
}
entry->value = reinterpret_cast<void*>(entries_.size());
- SnapshotObjectId id = next_id_;
- next_id_ += kObjectIdStep;
+ SnapshotObjectId id = get_next_id();
entries_.push_back(EntryInfo(id, addr, size, accessed));
DCHECK(static_cast<uint32_t>(entries_.size()) > entries_map_.occupancy());
return id;
@@ -553,6 +554,16 @@ HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
return AddEntry(HeapObject::cast(Object(reinterpret_cast<Address>(ptr))));
}
+HeapEntry* V8HeapExplorer::AllocateEntry(Smi smi) {
+ SnapshotObjectId id = heap_object_map_->get_next_id();
+ HeapEntry* entry =
+ snapshot_->AddEntry(HeapEntry::kHeapNumber, "smi number", id, 0, 0);
+ // XXX: Smis do not appear in CombinedHeapObjectIterator, so we need to
+ // extract the references here
+ ExtractNumberReference(entry, smi);
+ return entry;
+}
+
void V8HeapExplorer::ExtractLocation(HeapEntry* entry, HeapObject object) {
if (object.IsJSFunction()) {
JSFunction func = JSFunction::cast(object);
@@ -638,7 +649,7 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject object) {
object.IsByteArray()) {
return AddEntry(object, HeapEntry::kArray, "");
} else if (object.IsHeapNumber()) {
- return AddEntry(object, HeapEntry::kHeapNumber, "number");
+ return AddEntry(object, HeapEntry::kHeapNumber, "heap number");
}
return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
}
@@ -721,6 +732,13 @@ class IndexedReferencesExtractor : public ObjectVisitor {
ObjectSlot end) override {
VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
}
+ void VisitMapPointer(HeapObject object) override {
+ if (generator_->visited_fields_[0]) {
+ generator_->visited_fields_[0] = false;
+ } else {
+ VisitHeapObjectImpl(object.map(), 0);
+ }
+ }
void VisitPointers(HeapObject host, MaybeObjectSlot start,
MaybeObjectSlot end) override {
// [start,end) must be a sub-region of [parent_start_, parent_end), i.e.
@@ -830,6 +848,10 @@ void V8HeapExplorer::ExtractReferences(HeapEntry* entry, HeapObject obj) {
ExtractEphemeronHashTableReferences(entry, EphemeronHashTable::cast(obj));
} else if (obj.IsFixedArray()) {
ExtractFixedArrayReferences(entry, FixedArray::cast(obj));
+ } else if (obj.IsHeapNumber()) {
+ if (snapshot_->capture_numeric_value()) {
+ ExtractNumberReference(entry, obj);
+ }
}
}
@@ -867,7 +889,7 @@ void V8HeapExplorer::ExtractJSObjectReferences(HeapEntry* entry,
} else if (obj.IsJSFunction()) {
JSFunction js_fun = JSFunction::cast(js_obj);
if (js_fun.has_prototype_slot()) {
- Object proto_or_map = js_fun.prototype_or_initial_map();
+ Object proto_or_map = js_fun.prototype_or_initial_map(kAcquireLoad);
if (!proto_or_map.IsTheHole(isolate)) {
if (!proto_or_map.IsMap()) {
SetPropertyReference(entry, roots.prototype_string(), proto_or_map,
@@ -1246,6 +1268,11 @@ class JSArrayBufferDataEntryAllocator : public HeapEntriesAllocator {
HeapEntry::kNative, "system / JSArrayBufferData",
size_);
}
+ HeapEntry* AllocateEntry(Smi smi) override {
+ DCHECK(false);
+ return nullptr;
+ }
+
private:
size_t size_;
V8HeapExplorer* explorer_;
@@ -1291,6 +1318,30 @@ void V8HeapExplorer::ExtractFixedArrayReferences(HeapEntry* entry,
}
}
+void V8HeapExplorer::ExtractNumberReference(HeapEntry* entry, Object number) {
+ DCHECK(number.IsNumber());
+
+ // Must be large enough to fit any double, int, or size_t.
+ char arr[32];
+ Vector<char> buffer(arr, arraysize(arr));
+
+ const char* string;
+ if (number.IsSmi()) {
+ int int_value = Smi::ToInt(number);
+ string = IntToCString(int_value, buffer);
+ } else {
+ double double_value = HeapNumber::cast(number).value();
+ string = DoubleToCString(double_value, buffer);
+ }
+
+ const char* name = names_->GetCopy(string);
+
+ SnapshotObjectId id = heap_object_map_->get_next_id();
+ HeapEntry* child_entry =
+ snapshot_->AddEntry(HeapEntry::kString, name, id, 0, 0);
+ entry->SetNamedReference(HeapGraphEdge::kInternal, "value", child_entry);
+}
+
void V8HeapExplorer::ExtractFeedbackVectorReferences(
HeapEntry* entry, FeedbackVector feedback_vector) {
MaybeObject code = feedback_vector.maybe_optimized_code();
@@ -1345,8 +1396,10 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject js_obj,
PropertyDetails details = descs.GetDetails(i);
switch (details.location()) {
case kField: {
- Representation r = details.representation();
- if (r.IsSmi() || r.IsDouble()) break;
+ if (!snapshot_->capture_numeric_value()) {
+ Representation r = details.representation();
+ if (r.IsSmi() || r.IsDouble()) break;
+ }
Name k = descs.GetKey(i);
FieldIndex field_index = FieldIndex::ForDescriptor(js_obj.map(), i);
@@ -1476,9 +1529,15 @@ String V8HeapExplorer::GetConstructorName(JSObject object) {
}
HeapEntry* V8HeapExplorer::GetEntry(Object obj) {
- return obj.IsHeapObject() ? generator_->FindOrAddEntry(
- reinterpret_cast<void*>(obj.ptr()), this)
- : nullptr;
+ if (obj.IsHeapObject()) {
+ return generator_->FindOrAddEntry(reinterpret_cast<void*>(obj.ptr()), this);
+ }
+
+ DCHECK(obj.IsSmi());
+ if (!snapshot_->capture_numeric_value()) {
+ return nullptr;
+ }
+ return generator_->FindOrAddEntry(Smi::cast(obj), this);
}
class RootsReferencesExtractor : public RootVisitor {
@@ -1500,6 +1559,7 @@ class RootsReferencesExtractor : public RootVisitor {
void VisitRootPointers(Root root, const char* description,
FullObjectSlot start, FullObjectSlot end) override {
for (FullObjectSlot p = start; p < end; ++p) {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
VisitRootPointer(root, description, p);
}
}
@@ -1649,23 +1709,25 @@ void V8HeapExplorer::SetElementReference(HeapEntry* parent_entry, int index,
void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry,
const char* reference_name,
Object child_obj, int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == nullptr) return;
- if (IsEssentialObject(child_obj)) {
- parent_entry->SetNamedReference(HeapGraphEdge::kInternal, reference_name,
- child_entry);
+ if (!IsEssentialObject(child_obj)) {
+ return;
}
+ HeapEntry* child_entry = GetEntry(child_obj);
+ DCHECK_NOT_NULL(child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kInternal, reference_name,
+ child_entry);
MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetInternalReference(HeapEntry* parent_entry, int index,
Object child_obj, int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == nullptr) return;
- if (IsEssentialObject(child_obj)) {
- parent_entry->SetNamedReference(HeapGraphEdge::kInternal,
- names_->GetName(index), child_entry);
+ if (!IsEssentialObject(child_obj)) {
+ return;
}
+ HeapEntry* child_entry = GetEntry(child_obj);
+ DCHECK_NOT_NULL(child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kInternal,
+ names_->GetName(index), child_entry);
MarkVisitedField(field_offset);
}
@@ -1673,9 +1735,13 @@ void V8HeapExplorer::SetHiddenReference(HeapObject parent_obj,
HeapEntry* parent_entry, int index,
Object child_obj, int field_offset) {
DCHECK_EQ(parent_entry, GetEntry(parent_obj));
+ DCHECK(!MapWord::IsPacked(child_obj.ptr()));
+ if (!IsEssentialObject(child_obj)) {
+ return;
+ }
HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != nullptr && IsEssentialObject(child_obj) &&
- IsEssentialHiddenReference(parent_obj, field_offset)) {
+ DCHECK_NOT_NULL(child_entry);
+ if (IsEssentialHiddenReference(parent_obj, field_offset)) {
parent_entry->SetIndexedReference(HeapGraphEdge::kHidden, index,
child_entry);
}
@@ -1684,23 +1750,25 @@ void V8HeapExplorer::SetHiddenReference(HeapObject parent_obj,
void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
const char* reference_name,
Object child_obj, int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == nullptr) return;
- if (IsEssentialObject(child_obj)) {
- parent_entry->SetNamedReference(HeapGraphEdge::kWeak, reference_name,
- child_entry);
+ if (!IsEssentialObject(child_obj)) {
+ return;
}
+ HeapEntry* child_entry = GetEntry(child_obj);
+ DCHECK_NOT_NULL(child_entry);
+ parent_entry->SetNamedReference(HeapGraphEdge::kWeak, reference_name,
+ child_entry);
MarkVisitedField(field_offset);
}
void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
Object child_obj, int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == nullptr) return;
- if (IsEssentialObject(child_obj)) {
- parent_entry->SetNamedReference(
- HeapGraphEdge::kWeak, names_->GetFormatted("%d", index), child_entry);
+ if (!IsEssentialObject(child_obj)) {
+ return;
}
+ HeapEntry* child_entry = GetEntry(child_obj);
+ DCHECK_NOT_NULL(child_entry);
+ parent_entry->SetNamedReference(
+ HeapGraphEdge::kWeak, names_->GetFormatted("%d", index), child_entry);
MarkVisitedField(field_offset);
}
@@ -1758,6 +1826,13 @@ void V8HeapExplorer::SetGcRootsReference(Root root) {
void V8HeapExplorer::SetGcSubrootReference(Root root, const char* description,
bool is_weak, Object child_obj) {
+ if (child_obj.IsSmi()) {
+ // TODO(arenevier): if we handle smis here, the snapshot gets 2 to 3 times
+ // slower on large heaps. According to perf, The bulk of the extra works
+ // happens in TemplateHashMapImpl::Probe method, when tyring to get
+ // names->GetFormatted("%d / %s", index, description)
+ return;
+ }
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry == nullptr) return;
const char* name = GetStrongGcSubrootName(child_obj);
@@ -1834,6 +1909,7 @@ class GlobalObjectsEnumerator : public RootVisitor {
void VisitRootPointersImpl(Root root, const char* description, TSlot start,
TSlot end) {
for (TSlot p = start; p < end; ++p) {
+ DCHECK(!MapWord::IsPacked(p.Relaxed_Load(isolate_).ptr()));
Object o = p.load(isolate_);
if (!o.IsNativeContext(isolate_)) continue;
JSObject proxy = Context::cast(o).global_proxy();
@@ -1934,6 +2010,7 @@ class EmbedderGraphEntriesAllocator : public HeapEntriesAllocator {
names_(snapshot_->profiler()->names()),
heap_object_map_(snapshot_->profiler()->heap_object_map()) {}
HeapEntry* AllocateEntry(HeapThing ptr) override;
+ HeapEntry* AllocateEntry(Smi smi) override;
private:
HeapSnapshot* snapshot_;
@@ -1984,6 +2061,11 @@ HeapEntry* EmbedderGraphEntriesAllocator::AllocateEntry(HeapThing ptr) {
return heap_entry;
}
+HeapEntry* EmbedderGraphEntriesAllocator::AllocateEntry(Smi smi) {
+ DCHECK(false);
+ return nullptr;
+}
+
NativeObjectsExplorer::NativeObjectsExplorer(
HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
: isolate_(
diff --git a/chromium/v8/src/profiler/heap-snapshot-generator.h b/chromium/v8/src/profiler/heap-snapshot-generator.h
index 12fd9450a9b..2ab13a99bf3 100644
--- a/chromium/v8/src/profiler/heap-snapshot-generator.h
+++ b/chromium/v8/src/profiler/heap-snapshot-generator.h
@@ -188,7 +188,8 @@ class HeapEntry {
// HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot {
public:
- explicit HeapSnapshot(HeapProfiler* profiler, bool global_objects_as_roots);
+ explicit HeapSnapshot(HeapProfiler* profiler, bool global_objects_as_roots,
+ bool capture_numeric_value);
HeapSnapshot(const HeapSnapshot&) = delete;
HeapSnapshot& operator=(const HeapSnapshot&) = delete;
void Delete();
@@ -213,6 +214,7 @@ class HeapSnapshot {
bool treat_global_objects_as_roots() const {
return treat_global_objects_as_roots_;
}
+ bool capture_numeric_value() const { return capture_numeric_value_; }
void AddLocation(HeapEntry* entry, int scriptId, int line, int col);
HeapEntry* AddEntry(HeapEntry::Type type,
@@ -245,6 +247,7 @@ class HeapSnapshot {
std::vector<SourceLocation> locations_;
SnapshotObjectId max_snapshot_js_object_id_ = -1;
bool treat_global_objects_as_roots_;
+ bool capture_numeric_value_;
};
@@ -277,6 +280,10 @@ class HeapObjectsMap {
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
}
+ SnapshotObjectId get_next_id() {
+ next_id_ += kObjectIdStep;
+ return next_id_ - kObjectIdStep;
+ }
void StopHeapObjectsTracking();
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream,
@@ -322,6 +329,7 @@ class HeapEntriesAllocator {
public:
virtual ~HeapEntriesAllocator() = default;
virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
+ virtual HeapEntry* AllocateEntry(Smi smi) = 0;
};
class SnapshottingProgressReportingInterface {
@@ -342,6 +350,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
V8HeapExplorer& operator=(const V8HeapExplorer&) = delete;
HeapEntry* AllocateEntry(HeapThing ptr) override;
+ HeapEntry* AllocateEntry(Smi smi) override;
int EstimateObjectsCount();
bool IterateAndExtractReferences(HeapSnapshotGenerator* generator);
void CollectGlobalObjectsTags();
@@ -397,6 +406,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void ExtractJSGeneratorObjectReferences(HeapEntry* entry,
JSGeneratorObject generator);
void ExtractFixedArrayReferences(HeapEntry* entry, FixedArray array);
+ void ExtractNumberReference(HeapEntry* entry, Object number);
void ExtractFeedbackVectorReferences(HeapEntry* entry,
FeedbackVector feedback_vector);
void ExtractDescriptorArrayReferences(HeapEntry* entry,
@@ -501,6 +511,9 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
// The HeapEntriesMap instance is used to track a mapping between
// real heap objects and their representations in heap snapshots.
using HeapEntriesMap = std::unordered_map<HeapThing, HeapEntry*>;
+ // The SmiEntriesMap instance is used to track a mapping between smi and
+ // their representations in heap snapshots.
+ using SmiEntriesMap = std::unordered_map<int, HeapEntry*>;
HeapSnapshotGenerator(HeapSnapshot* snapshot,
v8::ActivityControl* control,
@@ -515,16 +528,31 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
return it != entries_map_.end() ? it->second : nullptr;
}
+ HeapEntry* FindEntry(Smi smi) {
+ auto it = smis_map_.find(smi.value());
+ return it != smis_map_.end() ? it->second : nullptr;
+ }
+
HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
return entries_map_.emplace(ptr, allocator->AllocateEntry(ptr))
.first->second;
}
+ HeapEntry* AddEntry(Smi smi, HeapEntriesAllocator* allocator) {
+ return smis_map_.emplace(smi.value(), allocator->AllocateEntry(smi))
+ .first->second;
+ }
+
HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
HeapEntry* entry = FindEntry(ptr);
return entry != nullptr ? entry : AddEntry(ptr, allocator);
}
+ HeapEntry* FindOrAddEntry(Smi smi, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(smi);
+ return entry != nullptr ? entry : AddEntry(smi, allocator);
+ }
+
private:
bool FillReferences();
void ProgressStep() override;
@@ -537,6 +565,7 @@ class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
NativeObjectsExplorer dom_explorer_;
// Mapping from HeapThing pointers to HeapEntry indices.
HeapEntriesMap entries_map_;
+ SmiEntriesMap smis_map_;
// Used during snapshot generation.
int progress_counter_;
int progress_total_;
diff --git a/chromium/v8/src/profiler/profile-generator.cc b/chromium/v8/src/profiler/profile-generator.cc
index 375079de3e8..93075d4f7c2 100644
--- a/chromium/v8/src/profiler/profile-generator.cc
+++ b/chromium/v8/src/profiler/profile-generator.cc
@@ -533,6 +533,12 @@ void ProfileTree::TraverseDepthFirst(Callback* callback) {
}
}
+void ContextFilter::OnMoveEvent(Address from_address, Address to_address) {
+ if (native_context_address() != from_address) return;
+
+ set_native_context_address(to_address);
+}
+
using v8::tracing::TracedValue;
std::atomic<uint32_t> CpuProfile::last_id_;
@@ -557,6 +563,13 @@ CpuProfile::CpuProfile(CpuProfiler* profiler, const char* title,
value->SetDouble("startTime", start_time_.since_origin().InMicroseconds());
TRACE_EVENT_SAMPLE_WITH_ID1(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler"),
"Profile", id_, "data", std::move(value));
+
+ DisallowHeapAllocation no_gc;
+ if (options_.has_filter_context()) {
+ i::Address raw_filter_context =
+ reinterpret_cast<i::Address>(options_.raw_filter_context());
+ context_filter_.set_native_context_address(raw_filter_context);
+ }
}
bool CpuProfile::CheckSubsample(base::TimeDelta source_sampling_interval) {
@@ -706,6 +719,8 @@ void CpuProfile::StreamPendingTraceEvents() {
void CpuProfile::FinishProfile() {
end_time_ = base::TimeTicks::HighResolutionNow();
+ // Stop tracking context movements after profiling stops.
+ context_filter_.set_native_context_address(kNullAddress);
StreamPendingTraceEvents();
auto value = TracedValue::Create();
// The endTime timestamp is not converted to Perfetto's clock domain and will
@@ -942,14 +957,26 @@ base::TimeDelta CpuProfilesCollection::GetCommonSamplingInterval() const {
void CpuProfilesCollection::AddPathToCurrentProfiles(
base::TimeTicks timestamp, const ProfileStackTrace& path, int src_line,
- bool update_stats, base::TimeDelta sampling_interval) {
+ bool update_stats, base::TimeDelta sampling_interval,
+ Address native_context_address) {
// As starting / stopping profiles is rare relatively to this
// method, we don't bother minimizing the duration of lock holding,
// e.g. copying contents of the list to a local vector.
current_profiles_semaphore_.Wait();
for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
- profile->AddPath(timestamp, path, src_line, update_stats,
- sampling_interval);
+ if (profile->context_filter().Accept(native_context_address)) {
+ profile->AddPath(timestamp, path, src_line, update_stats,
+ sampling_interval);
+ }
+ }
+ current_profiles_semaphore_.Signal();
+}
+
+void CpuProfilesCollection::UpdateNativeContextAddressForCurrentProfiles(
+ Address from, Address to) {
+ current_profiles_semaphore_.Wait();
+ for (const std::unique_ptr<CpuProfile>& profile : current_profiles_) {
+ profile->context_filter().OnMoveEvent(from, to);
}
current_profiles_semaphore_.Signal();
}
diff --git a/chromium/v8/src/profiler/profile-generator.h b/chromium/v8/src/profiler/profile-generator.h
index 551dfdf5917..c4bffa945a0 100644
--- a/chromium/v8/src/profiler/profile-generator.h
+++ b/chromium/v8/src/profiler/profile-generator.h
@@ -237,6 +237,31 @@ struct CodeEntryAndLineNumber {
using ProfileStackTrace = std::vector<CodeEntryAndLineNumber>;
+// Filters stack frames from sources other than a target native context.
+class ContextFilter {
+ public:
+ explicit ContextFilter(Address native_context_address = kNullAddress)
+ : native_context_address_(native_context_address) {}
+
+ // Invoked when a native context has changed address.
+ void OnMoveEvent(Address from_address, Address to_address);
+
+ bool Accept(Address native_context_address) const {
+ if (native_context_address_ == kNullAddress) return true;
+ return (native_context_address & ~kHeapObjectTag) ==
+ native_context_address_;
+ }
+
+ // Update the context's tracked address based on VM-thread events.
+ void set_native_context_address(Address address) {
+ native_context_address_ = address;
+ }
+ Address native_context_address() const { return native_context_address_; }
+
+ private:
+ Address native_context_address_;
+};
+
class ProfileTree;
class V8_EXPORT_PRIVATE ProfileNode {
@@ -386,6 +411,7 @@ class CpuProfile {
base::TimeTicks start_time() const { return start_time_; }
base::TimeTicks end_time() const { return end_time_; }
CpuProfiler* cpu_profiler() const { return profiler_; }
+ ContextFilter& context_filter() { return context_filter_; }
void UpdateTicksScale();
@@ -397,6 +423,7 @@ class CpuProfile {
const char* title_;
const CpuProfilingOptions options_;
std::unique_ptr<DiscardedSamplesDelegate> delegate_;
+ ContextFilter context_filter_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
std::deque<SampleInfo> samples_;
@@ -486,7 +513,11 @@ class V8_EXPORT_PRIVATE CpuProfilesCollection {
void AddPathToCurrentProfiles(base::TimeTicks timestamp,
const ProfileStackTrace& path, int src_line,
bool update_stats,
- base::TimeDelta sampling_interval);
+ base::TimeDelta sampling_interval,
+ Address native_context_address = kNullAddress);
+
+ // Called from profile generator thread.
+ void UpdateNativeContextAddressForCurrentProfiles(Address from, Address to);
// Limits the number of profiles that can be simultaneously collected.
static const int kMaxSimultaneousProfiles = 100;
diff --git a/chromium/v8/src/profiler/profiler-listener.cc b/chromium/v8/src/profiler/profiler-listener.cc
index 8b253fb4729..a2cfb8b07b2 100644
--- a/chromium/v8/src/profiler/profiler-listener.cc
+++ b/chromium/v8/src/profiler/profiler-listener.cc
@@ -302,6 +302,13 @@ void ProfilerListener::CodeMoveEvent(AbstractCode from, AbstractCode to) {
DispatchCodeEvent(evt_rec);
}
+void ProfilerListener::NativeContextMoveEvent(Address from, Address to) {
+ CodeEventsContainer evt_rec(CodeEventRecord::NATIVE_CONTEXT_MOVE);
+ evt_rec.NativeContextMoveEventRecord_.from_address = from;
+ evt_rec.NativeContextMoveEventRecord_.to_address = to;
+ DispatchCodeEvent(evt_rec);
+}
+
void ProfilerListener::CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
diff --git a/chromium/v8/src/profiler/profiler-listener.h b/chromium/v8/src/profiler/profiler-listener.h
index 49e7db32baa..50a9b818936 100644
--- a/chromium/v8/src/profiler/profiler-listener.h
+++ b/chromium/v8/src/profiler/profiler-listener.h
@@ -59,6 +59,7 @@ class V8_EXPORT_PRIVATE ProfilerListener : public CodeEventListener,
Handle<String> source) override;
void CodeMoveEvent(AbstractCode from, AbstractCode to) override;
void SharedFunctionInfoMoveEvent(Address from, Address to) override {}
+ void NativeContextMoveEvent(Address from, Address to) override;
void CodeMovingGCEvent() override {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) override;
diff --git a/chromium/v8/src/profiler/sampling-heap-profiler.cc b/chromium/v8/src/profiler/sampling-heap-profiler.cc
index aef0170bb48..4bef9793abf 100644
--- a/chromium/v8/src/profiler/sampling-heap-profiler.cc
+++ b/chromium/v8/src/profiler/sampling-heap-profiler.cc
@@ -75,7 +75,7 @@ void SamplingHeapProfiler::SampleObject(Address soon_object, size_t size) {
DisallowGarbageCollection no_gc;
// Check if the area is iterable by confirming that it starts with a map.
- DCHECK((*ObjectSlot(soon_object)).IsMap());
+ DCHECK(HeapObject::FromAddress(soon_object).map().IsMap());
HandleScope scope(isolate_);
HeapObject heap_object = HeapObject::FromAddress(soon_object);
diff --git a/chromium/v8/src/profiler/tick-sample.cc b/chromium/v8/src/profiler/tick-sample.cc
index 638aa5545a1..253b80d19e2 100644
--- a/chromium/v8/src/profiler/tick-sample.cc
+++ b/chromium/v8/src/profiler/tick-sample.cc
@@ -7,14 +7,14 @@
#include <cinttypes>
#include "include/v8-profiler.h"
+#include "src/base/sanitizer/asan.h"
+#include "src/base/sanitizer/msan.h"
#include "src/execution/frames-inl.h"
#include "src/execution/simulator.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/heap-inl.h" // For Heap::code_range.
#include "src/logging/counters.h"
#include "src/profiler/profiler-stats.h"
-#include "src/sanitizer/asan.h"
-#include "src/sanitizer/msan.h"
namespace v8 {
namespace internal {
@@ -177,6 +177,7 @@ DISABLE_ASAN void TickSample::Init(Isolate* v8_isolate,
pc = regs.pc;
frames_count = static_cast<unsigned>(info.frames_count);
has_external_callback = info.external_callback_entry != nullptr;
+ context = info.context;
if (has_external_callback) {
external_callback_entry = info.external_callback_entry;
} else if (frames_count) {
@@ -209,6 +210,7 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
sample_info->frames_count = 0;
sample_info->vm_state = isolate->current_vm_state();
sample_info->external_callback_entry = nullptr;
+ sample_info->context = nullptr;
if (sample_info->vm_state == GC) return true;
i::Address js_entry_sp = isolate->js_entry_sp();
@@ -232,7 +234,7 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
// TODO(petermarshall): Code range is always null on ia32 so this check for
// IsNoFrameRegion will never actually run there.
if (regs->pc &&
- isolate->heap()->memory_allocator()->code_range().contains(
+ isolate->heap()->code_region().contains(
reinterpret_cast<i::Address>(regs->pc)) &&
IsNoFrameRegion(reinterpret_cast<i::Address>(regs->pc))) {
// The frame is not setup, so it'd be hard to iterate the stack. Bailout.
@@ -278,6 +280,13 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
reinterpret_cast<i::Address>(regs->lr),
js_entry_sp);
+ Context top_context = isolate->context();
+ if (top_context.ptr() != i::Context::kNoContext &&
+ top_context.ptr() != i::Context::kInvalidContext) {
+ NativeContext top_native_context = top_context.native_context();
+ sample_info->context = reinterpret_cast<void*>(top_native_context.ptr());
+ }
+
if (it.done()) return true;
size_t i = 0;
@@ -287,15 +296,18 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
frames[i] = reinterpret_cast<void*>(isolate->c_function());
i++;
}
-
+#ifdef V8_RUNTIME_CALL_STATS
i::RuntimeCallTimer* timer =
isolate->counters()->runtime_call_stats()->current_timer();
+#endif // V8_RUNTIME_CALL_STATS
for (; !it.done() && i < frames_limit; it.Advance()) {
+#ifdef V8_RUNTIME_CALL_STATS
while (timer && reinterpret_cast<i::Address>(timer) < it.frame()->fp() &&
i < frames_limit) {
frames[i++] = reinterpret_cast<void*>(timer->counter());
timer = timer->parent();
}
+#endif // V8_RUNTIME_CALL_STATS
if (i == frames_limit) break;
if (it.frame()->is_interpreted()) {
diff --git a/chromium/v8/src/profiler/tick-sample.h b/chromium/v8/src/profiler/tick-sample.h
index 777c3d192dc..1bfcb7d0971 100644
--- a/chromium/v8/src/profiler/tick-sample.h
+++ b/chromium/v8/src/profiler/tick-sample.h
@@ -90,6 +90,7 @@ struct V8_EXPORT TickSample {
static const unsigned kMaxFramesCountLog2 = 8;
static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
void* stack[kMaxFramesCount]; // Call stack.
+ void* context = nullptr; // Address of the incumbent native context.
unsigned frames_count : kMaxFramesCountLog2; // Number of captured frames.
bool has_external_callback : 1;
bool update_stats : 1; // Whether the sample should update aggregated stats.
diff --git a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 0441fe29763..5e24500a261 100644
--- a/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/chromium/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -395,7 +395,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Sub(current_input_offset().X(), current_input_offset().X(),
Operand(capture_length, SXTW));
}
- if (masm_->emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
// The current input offset should be <= 0, and fit in a W register.
@@ -528,7 +528,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(int start_reg,
Operand(capture_length, SXTW));
}
- if (masm_->emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Cmp(current_input_offset().X(), Operand(current_input_offset(), SXTW));
__ Ccmp(current_input_offset(), 0, NoFlag, eq);
// The current input offset should be <= 0, and fit in a W register.
@@ -817,7 +817,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Find negative length (offset of start relative to end).
__ Sub(x10, input_start(), input_end());
- if (masm_->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Check that the size of the input string chars is in range.
__ Neg(x11, x10);
__ Cmp(x11, SeqTwoByteString::kMaxCharsSize);
@@ -882,7 +882,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM64::GetCode(Handle<String> source) {
// Get string length.
__ Sub(x10, input_end(), input_start());
- if (masm_->emit_debug_code()) {
+ if (FLAG_debug_code) {
// Check that the size of the input string chars is in range.
__ Cmp(x10, SeqTwoByteString::kMaxCharsSize);
__ Check(ls, AbortReason::kInputStringTooLong);
@@ -1167,7 +1167,7 @@ void RegExpMacroAssemblerARM64::PushBacktrack(Label* label) {
} else {
__ Adr(x10, label, MacroAssembler::kAdrFar);
__ Sub(x10, x10, code_pointer());
- if (masm_->emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Cmp(x10, kWRegMask);
// The code offset has to fit in a W register.
__ Check(ls, AbortReason::kOffsetOutOfRange);
@@ -1322,7 +1322,7 @@ void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerARM64::WriteStackPointerToRegister(int reg) {
__ Ldr(x10, MemOperand(frame_pointer(), kStackBase));
__ Sub(x10, backtrack_stackpointer(), x10);
- if (masm_->emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Cmp(x10, Operand(w10, SXTW));
// The stack offset needs to fit in a W register.
__ Check(eq, AbortReason::kOffsetOutOfRange);
@@ -1629,7 +1629,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
}
if (cp_offset != 0) {
- if (masm_->emit_debug_code()) {
+ if (FLAG_debug_code) {
__ Mov(x10, cp_offset * char_size());
__ Add(x10, x10, Operand(current_input_offset(), SXTW));
__ Cmp(x10, Operand(w10, SXTW));
diff --git a/chromium/v8/src/regexp/ppc/OWNERS b/chromium/v8/src/regexp/ppc/OWNERS
deleted file mode 100644
index 02c2cd757c9..00000000000
--- a/chromium/v8/src/regexp/ppc/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-junyan@redhat.com
-joransiu@ca.ibm.com
-midawson@redhat.com
-mfarazma@redhat.com
-vasili.skurydzin@ibm.com
diff --git a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index c0d69297f99..77a09526372 100644
--- a/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/chromium/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -162,7 +162,7 @@ void RegExpMacroAssemblerPPC::AdvanceRegister(int reg, int by) {
DCHECK_LE(0, reg);
DCHECK_GT(num_registers_, reg);
if (by != 0) {
- __ LoadP(r3, register_location(reg), r0);
+ __ LoadU64(r3, register_location(reg), r0);
__ mov(r0, Operand(by));
__ add(r3, r3, r0);
__ StoreP(r3, register_location(reg), r0);
@@ -174,7 +174,7 @@ void RegExpMacroAssemblerPPC::Backtrack() {
CheckPreemption();
if (has_backtrack_limit()) {
Label next;
- __ LoadP(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
+ __ LoadU64(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ addi(r3, r3, Operand(1));
__ StoreP(r3, MemOperand(frame_pointer(), kBacktrackCount), r0);
__ mov(r0, Operand(backtrack_limit()));
@@ -213,7 +213,7 @@ void RegExpMacroAssemblerPPC::CheckCharacterGT(uc16 limit, Label* on_greater) {
}
void RegExpMacroAssemblerPPC::CheckAtStart(int cp_offset, Label* on_at_start) {
- __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
__ addi(r3, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ cmp(r3, r4);
@@ -222,7 +222,7 @@ void RegExpMacroAssemblerPPC::CheckAtStart(int cp_offset, Label* on_at_start) {
void RegExpMacroAssemblerPPC::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
__ addi(r3, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
__ cmp(r3, r4);
@@ -238,7 +238,7 @@ void RegExpMacroAssemblerPPC::CheckCharacterLT(uc16 limit, Label* on_less) {
void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal;
- __ LoadP(r3, MemOperand(backtrack_stackpointer(), 0));
+ __ LoadU64(r3, MemOperand(backtrack_stackpointer(), 0));
__ cmp(current_input_offset(), r3);
__ bne(&backtrack_non_equal);
__ addi(backtrack_stackpointer(), backtrack_stackpointer(),
@@ -251,8 +251,9 @@ void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
- __ LoadP(r3, register_location(start_reg), r0); // Index of start of capture
- __ LoadP(r4, register_location(start_reg + 1), r0); // Index of end
+ __ LoadU64(r3, register_location(start_reg),
+ r0); // Index of start of capture
+ __ LoadU64(r4, register_location(start_reg + 1), r0); // Index of end
__ sub(r4, r4, r3, LeaveOE, SetRC); // Length of capture.
// At this point, the capture registers are either both set or both cleared.
@@ -262,7 +263,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// Check that there are enough characters left in the input.
if (read_backward) {
- __ LoadP(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
__ add(r6, r6, r4);
__ cmp(current_input_offset(), r6);
BranchOrBacktrack(le, on_no_match);
@@ -325,9 +326,10 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
// Compute new value of character position after the matched part.
__ sub(current_input_offset(), r5, end_of_input_address());
if (read_backward) {
- __ LoadP(r3, register_location(start_reg)); // Index of start of capture
- __ LoadP(r4,
- register_location(start_reg + 1)); // Index of end of capture
+ __ LoadU64(r3,
+ register_location(start_reg)); // Index of start of capture
+ __ LoadU64(r4,
+ register_location(start_reg + 1)); // Index of end of capture
__ add(current_input_offset(), current_input_offset(), r3);
__ sub(current_input_offset(), current_input_offset(), r4);
}
@@ -391,8 +393,8 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
Label fallthrough;
// Find length of back-referenced capture.
- __ LoadP(r3, register_location(start_reg), r0);
- __ LoadP(r4, register_location(start_reg + 1), r0);
+ __ LoadU64(r3, register_location(start_reg), r0);
+ __ LoadU64(r4, register_location(start_reg + 1), r0);
__ sub(r4, r4, r3, LeaveOE, SetRC); // Length to check.
// At this point, the capture registers are either both set or both cleared.
@@ -402,7 +404,7 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
// Check that there are enough characters left in the input.
if (read_backward) {
- __ LoadP(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r6, MemOperand(frame_pointer(), kStringStartMinusOne));
__ add(r6, r6, r4);
__ cmp(current_input_offset(), r6);
BranchOrBacktrack(le, on_no_match);
@@ -442,8 +444,9 @@ void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
// Move current character position to position after match.
__ sub(current_input_offset(), r5, end_of_input_address());
if (read_backward) {
- __ LoadP(r3, register_location(start_reg)); // Index of start of capture
- __ LoadP(r4, register_location(start_reg + 1)); // Index of end of capture
+ __ LoadU64(r3, register_location(start_reg)); // Index of start of capture
+ __ LoadU64(r4,
+ register_location(start_reg + 1)); // Index of end of capture
__ add(current_input_offset(), current_input_offset(), r3);
__ sub(current_input_offset(), current_input_offset(), r4);
}
@@ -715,7 +718,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_jslimit(isolate());
__ mov(r3, Operand(stack_limit));
- __ LoadP(r3, MemOperand(r3));
+ __ LoadU64(r3, MemOperand(r3));
__ sub(r3, sp, r3, LeaveOE, SetRC);
// Handle it if the stack pointer is already below the stack limit.
__ ble(&stack_limit_hit, cr0);
@@ -739,14 +742,14 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ Add(sp, sp, -num_registers_ * kSystemPointerSize, r0);
// Load string end.
- __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ LoadU64(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start.
- __ LoadP(r3, MemOperand(frame_pointer(), kInputStart));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kInputStart));
// Find negative length (offset of start relative to end).
__ sub(current_input_offset(), r3, end_of_input_address());
// Set r3 to address of char before start of the input string
// (effectively string position -1).
- __ LoadP(r4, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStartIndex));
__ subi(r3, current_input_offset(), Operand(char_size()));
if (mode_ == UC16) {
__ ShiftLeftImm(r0, r4, Operand(1));
@@ -795,8 +798,8 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
}
// Initialize backtrack stack pointer.
- __ LoadP(backtrack_stackpointer(),
- MemOperand(frame_pointer(), kStackHighEnd));
+ __ LoadU64(backtrack_stackpointer(),
+ MemOperand(frame_pointer(), kStackHighEnd));
__ b(&start_label_);
@@ -806,9 +809,9 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// copy captures to output
- __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
- __ LoadP(r3, MemOperand(frame_pointer(), kRegisterOutput));
- __ LoadP(r5, MemOperand(frame_pointer(), kStartIndex));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kInputStart));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kRegisterOutput));
+ __ LoadU64(r5, MemOperand(frame_pointer(), kStartIndex));
__ sub(r4, end_of_input_address(), r4);
// r4 is length of input in bytes.
if (mode_ == UC16) {
@@ -823,8 +826,8 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// unroll the loop once to add an operation between a load of a register
// and the following use of that register.
for (int i = 0; i < num_saved_registers_; i += 2) {
- __ LoadP(r5, register_location(i), r0);
- __ LoadP(r6, register_location(i + 1), r0);
+ __ LoadU64(r5, register_location(i), r0);
+ __ LoadU64(r6, register_location(i + 1), r0);
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in r25 for the zero-length check later.
__ mr(r25, r5);
@@ -847,9 +850,9 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ LoadP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ LoadP(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ LoadP(r5, MemOperand(frame_pointer(), kRegisterOutput));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ LoadU64(r5, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ addi(r3, r3, Operand(1));
__ StoreP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
@@ -866,7 +869,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ StoreP(r5, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare r3 to initialize registers with its value in the next run.
- __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -894,7 +897,7 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
// Exit and return r3
__ bind(&exit_label_);
if (global()) {
- __ LoadP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_r3);
@@ -925,7 +928,8 @@ Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
__ bne(&return_r3);
// String might have moved: Reload end of string from frame.
- __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ LoadU64(end_of_input_address(),
+ MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
}
@@ -984,7 +988,7 @@ void RegExpMacroAssemblerPPC::GoTo(Label* to) { BranchOrBacktrack(al, to); }
void RegExpMacroAssemblerPPC::IfRegisterGE(int reg, int comparand,
Label* if_ge) {
- __ LoadP(r3, register_location(reg), r0);
+ __ LoadU64(r3, register_location(reg), r0);
__ Cmpi(r3, Operand(comparand), r0);
BranchOrBacktrack(ge, if_ge);
}
@@ -992,14 +996,14 @@ void RegExpMacroAssemblerPPC::IfRegisterGE(int reg, int comparand,
void RegExpMacroAssemblerPPC::IfRegisterLT(int reg, int comparand,
Label* if_lt) {
- __ LoadP(r3, register_location(reg), r0);
+ __ LoadU64(r3, register_location(reg), r0);
__ Cmpi(r3, Operand(comparand), r0);
BranchOrBacktrack(lt, if_lt);
}
void RegExpMacroAssemblerPPC::IfRegisterEqPos(int reg, Label* if_eq) {
- __ LoadP(r3, register_location(reg), r0);
+ __ LoadU64(r3, register_location(reg), r0);
__ cmp(r3, current_input_offset());
BranchOrBacktrack(eq, if_eq);
}
@@ -1036,20 +1040,20 @@ void RegExpMacroAssemblerPPC::PushCurrentPosition() {
void RegExpMacroAssemblerPPC::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
- __ LoadP(r3, register_location(register_index), r0);
+ __ LoadU64(r3, register_location(register_index), r0);
Push(r3);
if (check_stack_limit) CheckStackLimit();
}
void RegExpMacroAssemblerPPC::ReadCurrentPositionFromRegister(int reg) {
- __ LoadP(current_input_offset(), register_location(reg), r0);
+ __ LoadU64(current_input_offset(), register_location(reg), r0);
}
void RegExpMacroAssemblerPPC::ReadStackPointerFromRegister(int reg) {
- __ LoadP(backtrack_stackpointer(), register_location(reg), r0);
- __ LoadP(r3, MemOperand(frame_pointer(), kStackHighEnd));
+ __ LoadU64(backtrack_stackpointer(), register_location(reg), r0);
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStackHighEnd));
__ add(backtrack_stackpointer(), backtrack_stackpointer(), r3);
}
@@ -1094,7 +1098,7 @@ void RegExpMacroAssemblerPPC::WriteCurrentPositionToRegister(int reg,
void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ LoadP(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r3, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
__ StoreP(r3, register_location(reg), r0);
}
@@ -1102,7 +1106,7 @@ void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
- __ LoadP(r4, MemOperand(frame_pointer(), kStackHighEnd));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStackHighEnd));
__ sub(r3, backtrack_stackpointer(), r4);
__ StoreP(r3, register_location(reg), r0);
}
@@ -1160,7 +1164,7 @@ void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
// Restore the stack pointer
stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
if (frame_alignment > kSystemPointerSize) {
- __ LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
+ __ LoadU64(sp, MemOperand(sp, stack_space * kSystemPointerSize));
} else {
__ addi(sp, sp, Operand(stack_space * kSystemPointerSize));
}
@@ -1213,7 +1217,7 @@ void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
__ Cmpi(current_input_offset(), Operand(-cp_offset * char_size()), r0);
BranchOrBacktrack(ge, on_outside_input);
} else {
- __ LoadP(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ LoadU64(r4, MemOperand(frame_pointer(), kStringStartMinusOne));
__ addi(r3, current_input_offset(), Operand(cp_offset * char_size()));
__ cmp(r3, r4);
BranchOrBacktrack(le, on_outside_input);
@@ -1271,7 +1275,7 @@ void RegExpMacroAssemblerPPC::Push(Register source) {
void RegExpMacroAssemblerPPC::Pop(Register target) {
DCHECK(target != backtrack_stackpointer());
- __ LoadP(target, MemOperand(backtrack_stackpointer()));
+ __ LoadU64(target, MemOperand(backtrack_stackpointer()));
__ addi(backtrack_stackpointer(), backtrack_stackpointer(),
Operand(kSystemPointerSize));
}
@@ -1282,7 +1286,7 @@ void RegExpMacroAssemblerPPC::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_jslimit(isolate());
__ mov(r3, Operand(stack_limit));
- __ LoadP(r3, MemOperand(r3));
+ __ LoadU64(r3, MemOperand(r3));
__ cmpl(sp, r3);
SafeCall(&check_preempt_label_, le);
}
@@ -1292,7 +1296,7 @@ void RegExpMacroAssemblerPPC::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit_address(isolate());
__ mov(r3, Operand(stack_limit));
- __ LoadP(r3, MemOperand(r3));
+ __ LoadU64(r3, MemOperand(r3));
__ cmpl(backtrack_stackpointer(), r3);
SafeCall(&stack_overflow_label_, le);
}
diff --git a/chromium/v8/src/regexp/regexp-ast.cc b/chromium/v8/src/regexp/regexp-ast.cc
index dd9e719c948..aaca95b64c9 100644
--- a/chromium/v8/src/regexp/regexp-ast.cc
+++ b/chromium/v8/src/regexp/regexp-ast.cc
@@ -293,14 +293,12 @@ void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
return nullptr;
}
-
-std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) { // NOLINT
+std::ostream& RegExpTree::Print(std::ostream& os, Zone* zone) {
RegExpUnparser unparser(os, zone);
Accept(&unparser, nullptr);
return os;
}
-
RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
: alternatives_(alternatives) {
DCHECK_LT(1, alternatives->length());
diff --git a/chromium/v8/src/regexp/regexp-ast.h b/chromium/v8/src/regexp/regexp-ast.h
index c038c640f49..89f523bf2e8 100644
--- a/chromium/v8/src/regexp/regexp-ast.h
+++ b/chromium/v8/src/regexp/regexp-ast.h
@@ -214,8 +214,7 @@ class RegExpTree : public ZoneObject {
// expression.
virtual Interval CaptureRegisters() { return Interval::Empty(); }
virtual void AppendToText(RegExpText* text, Zone* zone);
- V8_EXPORT_PRIVATE std::ostream& Print(std::ostream& os,
- Zone* zone); // NOLINT
+ V8_EXPORT_PRIVATE std::ostream& Print(std::ostream& os, Zone* zone);
#define MAKE_ASTYPE(Name) \
virtual RegExp##Name* As##Name(); \
virtual bool Is##Name();
diff --git a/chromium/v8/src/regexp/regexp-bytecodes.h b/chromium/v8/src/regexp/regexp-bytecodes.h
index e3248d7b837..088fd93b231 100644
--- a/chromium/v8/src/regexp/regexp-bytecodes.h
+++ b/chromium/v8/src/regexp/regexp-bytecodes.h
@@ -231,7 +231,7 @@ static constexpr int kRegExpBytecodeLengths[] = {
};
inline constexpr int RegExpBytecodeLength(int bytecode) {
- CONSTEXPR_DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
+ DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
return kRegExpBytecodeLengths[bytecode];
}
@@ -242,7 +242,7 @@ static constexpr const char* const kRegExpBytecodeNames[] = {
};
inline constexpr const char* RegExpBytecodeName(int bytecode) {
- CONSTEXPR_DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
+ DCHECK(base::IsInRange(bytecode, 0, kRegExpBytecodeCount - 1));
return kRegExpBytecodeNames[bytecode];
}
diff --git a/chromium/v8/src/regexp/regexp-compiler.cc b/chromium/v8/src/regexp/regexp-compiler.cc
index c743ee25631..36661f45f2c 100644
--- a/chromium/v8/src/regexp/regexp-compiler.cc
+++ b/chromium/v8/src/regexp/regexp-compiler.cc
@@ -1435,9 +1435,11 @@ EatsAtLeastInfo LoopChoiceNode::EatsAtLeastFromLoopEntry() {
DCHECK_EQ(alternatives_->length(), 2); // There's just loop and continue.
if (read_backward()) {
- // Can't do anything special for a backward loop, so return the basic values
- // that we got during analysis.
- return *eats_at_least_info();
+ // The eats_at_least value is not used if reading backward. The
+ // EatsAtLeastPropagator should've zeroed it as well.
+ DCHECK_EQ(eats_at_least_info()->eats_at_least_from_possibly_start, 0);
+ DCHECK_EQ(eats_at_least_info()->eats_at_least_from_not_start, 0);
+ return {};
}
// Figure out how much the loop body itself eats, not including anything in
@@ -3531,14 +3533,23 @@ class EatsAtLeastPropagator : public AllStatic {
}
static void VisitAction(ActionNode* that) {
- // POSITIVE_SUBMATCH_SUCCESS rewinds input, so we must not consider
- // successor nodes for eats_at_least. SET_REGISTER_FOR_LOOP indicates a loop
- // entry point, which means the loop body will run at least the minimum
- // number of times before the continuation case can run. Otherwise the
- // current node eats at least as much as its successor.
+ // - BEGIN_SUBMATCH and POSITIVE_SUBMATCH_SUCCESS wrap lookarounds.
+ // Lookarounds rewind input, so their eats_at_least value must not
+ // propagate to surroundings.
+ // TODO(jgruber): Instead of resetting EAL to 0 at lookaround boundaries,
+ // analysis should instead skip over the lookaround and look at whatever
+ // follows the lookaround. A simple solution would be to store a pointer to
+ // the associated POSITIVE_SUBMATCH_SUCCESS node in the BEGIN_SUBMATCH
+ // node, and use that during analysis.
+ // - SET_REGISTER_FOR_LOOP indicates a loop entry point, which means the
+ // loop body will run at least the minimum number of times before the
+ // continuation case can run. Otherwise the current node eats at least as
+ // much as its successor.
switch (that->action_type()) {
+ case ActionNode::BEGIN_SUBMATCH:
case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
- break; // Was already initialized to zero.
+ DCHECK(that->eats_at_least_info()->IsZero());
+ break;
case ActionNode::SET_REGISTER_FOR_LOOP:
that->set_eats_at_least_info(
that->on_success()->EatsAtLeastFromLoopEntry());
@@ -3560,7 +3571,10 @@ class EatsAtLeastPropagator : public AllStatic {
}
static void VisitLoopChoiceContinueNode(LoopChoiceNode* that) {
- that->set_eats_at_least_info(*that->continue_node()->eats_at_least_info());
+ if (!that->read_backward()) {
+ that->set_eats_at_least_info(
+ *that->continue_node()->eats_at_least_info());
+ }
}
static void VisitLoopChoiceLoopNode(LoopChoiceNode* that) {}
diff --git a/chromium/v8/src/regexp/regexp-dotprinter.cc b/chromium/v8/src/regexp/regexp-dotprinter.cc
index 4412960b3dc..b708e8d7355 100644
--- a/chromium/v8/src/regexp/regexp-dotprinter.cc
+++ b/chromium/v8/src/regexp/regexp-dotprinter.cc
@@ -60,8 +60,7 @@ void DotPrinterImpl::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
class AttributePrinter {
public:
- explicit AttributePrinter(std::ostream& os) // NOLINT
- : os_(os), first_(true) {}
+ explicit AttributePrinter(std::ostream& os) : os_(os), first_(true) {}
void PrintSeparator() {
if (first_) {
first_ = false;
diff --git a/chromium/v8/src/regexp/regexp-macro-assembler.cc b/chromium/v8/src/regexp/regexp-macro-assembler.cc
index db45e21ca04..36ef75bc422 100644
--- a/chromium/v8/src/regexp/regexp-macro-assembler.cc
+++ b/chromium/v8/src/regexp/regexp-macro-assembler.cc
@@ -309,10 +309,9 @@ int NativeRegExpMacroAssembler::Execute(
RegExp::CallOrigin call_origin = RegExp::CallOrigin::kFromRuntime;
using RegexpMatcherSig = int(
- Address input_string, int start_offset, // NOLINT(readability/casting)
- const byte* input_start, const byte* input_end, int* output,
- int output_size, Address stack_base, int call_origin, Isolate* isolate,
- Address regexp);
+ Address input_string, int start_offset, const byte* input_start,
+ const byte* input_end, int* output, int output_size, Address stack_base,
+ int call_origin, Isolate* isolate, Address regexp);
auto fn = GeneratedCode<RegexpMatcherSig>::FromCode(code);
int result =
diff --git a/chromium/v8/src/regexp/regexp-nodes.h b/chromium/v8/src/regexp/regexp-nodes.h
index 9b863ef2396..5a7df3df6bf 100644
--- a/chromium/v8/src/regexp/regexp-nodes.h
+++ b/chromium/v8/src/regexp/regexp-nodes.h
@@ -109,6 +109,11 @@ struct EatsAtLeastInfo final {
}
}
+ bool IsZero() const {
+ return eats_at_least_from_possibly_start == 0 &&
+ eats_at_least_from_not_start == 0;
+ }
+
// Any successful match starting from the current node will consume at least
// this many characters. This does not necessarily mean that there is a
// possible match with exactly this many characters, but we generally try to
diff --git a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 79574ca9932..a02cccb96a4 100644
--- a/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/chromium/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -356,8 +356,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate.
__ LoadAddress(arg_reg_4, ExternalReference::isolate_address(isolate()));
- { // NOLINT: Can't find a way to open this scope without confusing the
- // linter.
+ {
AllowExternalCallThatCantCauseGC scope(&masm_);
ExternalReference compare =
unicode ? ExternalReference::re_case_insensitive_compare_unicode(
@@ -672,7 +671,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
void RegExpMacroAssemblerX64::Fail() {
STATIC_ASSERT(FAILURE == 0); // Return value for failure is zero.
if (!global()) {
- __ Set(rax, FAILURE);
+ __ Move(rax, FAILURE);
}
__ jmp(&exit_label_);
}
@@ -750,7 +749,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
- __ Set(rax, EXCEPTION);
+ __ Move(rax, EXCEPTION);
__ jmp(&return_rax);
__ bind(&stack_limit_hit);
@@ -790,7 +789,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Load newline if index is at start, previous character otherwise.
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
__ j(not_equal, &load_char_start_regexp, Label::kNear);
- __ Set(current_character(), '\n');
+ __ Move(current_character(), '\n');
__ jmp(&start_regexp, Label::kNear);
// Global regexp restarts matching here.
@@ -805,7 +804,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Fill in stack push order, to avoid accessing across an unwritten
// page (a problem on Windows).
if (num_saved_registers_ > 8) {
- __ Set(rcx, kRegisterZero);
+ __ Move(rcx, kRegisterZero);
Label init_loop;
__ bind(&init_loop);
__ movq(Operand(rbp, rcx, times_1, 0), rax);
@@ -1002,13 +1001,13 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// If any of the code above needed to exit with an exception.
__ bind(&exit_with_exception);
// Exit with Result EXCEPTION(-1) to signal thrown exception.
- __ Set(rax, EXCEPTION);
+ __ Move(rax, EXCEPTION);
__ jmp(&return_rax);
}
if (fallback_label_.is_linked()) {
__ bind(&fallback_label_);
- __ Set(rax, FALLBACK_TO_EXPERIMENTAL);
+ __ Move(rax, FALLBACK_TO_EXPERIMENTAL);
__ jmp(&return_rax);
}
diff --git a/chromium/v8/src/roots/roots.cc b/chromium/v8/src/roots/roots.cc
index 3fdecfe0bf1..e79fdb31099 100644
--- a/chromium/v8/src/roots/roots.cc
+++ b/chromium/v8/src/roots/roots.cc
@@ -17,6 +17,10 @@ const char* RootsTable::root_names_[RootsTable::kEntriesCount] = {
#undef ROOT_NAME
};
+MapWord ReadOnlyRoots::one_pointer_filler_map_word() {
+ return MapWord::FromMap(one_pointer_filler_map());
+}
+
void ReadOnlyRoots::Iterate(RootVisitor* visitor) {
visitor->VisitRootPointers(Root::kReadOnlyRootList, nullptr,
FullObjectSlot(read_only_roots_),
diff --git a/chromium/v8/src/roots/roots.h b/chromium/v8/src/roots/roots.h
index 64758f5efaf..cfa0f897576 100644
--- a/chromium/v8/src/roots/roots.h
+++ b/chromium/v8/src/roots/roots.h
@@ -88,6 +88,7 @@ class Symbol;
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, global_dictionary_map, GlobalDictionaryMap) \
V(Map, many_closures_cell_map, ManyClosuresCellMap) \
+ V(Map, mega_dom_handler_map, MegaDomHandlerMap) \
V(Map, module_info_map, ModuleInfoMap) \
V(Map, name_dictionary_map, NameDictionaryMap) \
V(Map, no_closures_cell_map, NoClosuresCellMap) \
@@ -110,6 +111,9 @@ class Symbol;
V(Map, source_text_module_map, SourceTextModuleMap) \
V(Map, swiss_name_dictionary_map, SwissNameDictionaryMap) \
V(Map, synthetic_module_map, SyntheticModuleMap) \
+ IF_WASM(V, Map, wasm_exported_function_data_map, \
+ WasmExportedFunctionDataMap) \
+ IF_WASM(V, Map, wasm_js_function_data_map, WasmJSFunctionDataMap) \
IF_WASM(V, Map, wasm_type_info_map, WasmTypeInfoMap) \
V(Map, weak_fixed_array_map, WeakFixedArrayMap) \
V(Map, weak_array_list_map, WeakArrayListMap) \
@@ -210,6 +214,7 @@ class Symbol;
/* Protectors */ \
V(PropertyCell, array_constructor_protector, ArrayConstructorProtector) \
V(PropertyCell, no_elements_protector, NoElementsProtector) \
+ V(PropertyCell, mega_dom_protector, MegaDOMProtector) \
V(PropertyCell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
V(PropertyCell, array_species_protector, ArraySpeciesProtector) \
V(PropertyCell, typed_array_species_protector, TypedArraySpeciesProtector) \
@@ -536,6 +541,10 @@ class ReadOnlyRoots {
V8_INLINE explicit ReadOnlyRoots(Isolate* isolate);
V8_INLINE explicit ReadOnlyRoots(LocalIsolate* isolate);
+ // For `v8_enable_map_packing=true`, this will return a packed (also untagged)
+ // map-word instead of a tagged heap pointer.
+ MapWord one_pointer_filler_map_word();
+
#define ROOT_ACCESSOR(Type, name, CamelName) \
V8_INLINE class Type name() const; \
V8_INLINE class Type unchecked_##name() const; \
diff --git a/chromium/v8/src/runtime/runtime-array.cc b/chromium/v8/src/runtime/runtime-array.cc
index 623064fd8aa..bdfb666ab7c 100644
--- a/chromium/v8/src/runtime/runtime-array.cc
+++ b/chromium/v8/src/runtime/runtime-array.cc
@@ -28,7 +28,14 @@ RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Map, to_map, 1);
ElementsKind to_kind = to_map->elements_kind();
- ElementsAccessor::ForKind(to_kind)->TransitionElementsKind(object, to_map);
+ if (ElementsAccessor::ForKind(to_kind)
+ ->TransitionElementsKind(object, to_map)
+ .IsNothing()) {
+ // TODO(victorgomes): EffectControlLinearizer::LowerTransitionElementsKind
+ // does not handle exceptions.
+ FATAL("Fatal JavaScript invalid array size");
+ UNREACHABLE();
+ }
return *object;
}
@@ -180,7 +187,11 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
uint32_t capacity = static_cast<uint32_t>(object->elements().length());
if (index >= capacity) {
- if (!object->GetElementsAccessor()->GrowCapacity(object, index)) {
+ bool has_grown;
+ MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, has_grown,
+ object->GetElementsAccessor()->GrowCapacity(object, index));
+ if (!has_grown) {
return Smi::zero();
}
}
diff --git a/chromium/v8/src/runtime/runtime-classes.cc b/chromium/v8/src/runtime/runtime-classes.cc
index 120f2441a79..8fb37cec32a 100644
--- a/chromium/v8/src/runtime/runtime-classes.cc
+++ b/chromium/v8/src/runtime/runtime-classes.cc
@@ -188,7 +188,6 @@ Object GetMethodWithSharedName(
template <typename Dictionary>
Handle<Dictionary> ShallowCopyDictionaryTemplate(
Isolate* isolate, Handle<Dictionary> dictionary_template) {
- Handle<Map> dictionary_map(dictionary_template->map(), isolate);
Handle<Dictionary> dictionary =
Dictionary::ShallowCopy(isolate, dictionary_template);
// Clone all AccessorPairs in the dictionary.
@@ -375,7 +374,7 @@ bool AddDescriptorsByTemplate(
}
// Atomically commit the changes.
- receiver->synchronized_set_map(*map);
+ receiver->set_map(*map, kReleaseStore);
if (elements_dictionary->NumberOfElements() > 0) {
receiver->set_elements(*elements_dictionary);
}
@@ -469,7 +468,7 @@ bool AddDescriptorsByTemplate(
}
// Atomically commit the changes.
- receiver->synchronized_set_map(*map);
+ receiver->set_map(*map, kReleaseStore);
receiver->set_raw_properties_or_hash(*properties_dictionary);
if (elements_dictionary->NumberOfElements() > 0) {
receiver->set_elements(*elements_dictionary);
@@ -498,7 +497,7 @@ bool InitClassPrototype(Isolate* isolate,
map = Map::CopyDropDescriptors(isolate, map);
map->set_is_prototype_map(true);
Map::SetPrototype(isolate, map, prototype_parent);
- constructor->set_prototype_or_initial_map(*prototype);
+ constructor->set_prototype_or_initial_map(*prototype, kReleaseStore);
map->SetConstructor(*constructor);
Handle<FixedArray> computed_properties(
class_boilerplate->instance_computed_properties(), isolate);
diff --git a/chromium/v8/src/runtime/runtime-compiler.cc b/chromium/v8/src/runtime/runtime-compiler.cc
index 090a9261c44..1586e35e254 100644
--- a/chromium/v8/src/runtime/runtime-compiler.cc
+++ b/chromium/v8/src/runtime/runtime-compiler.cc
@@ -29,14 +29,6 @@ namespace internal {
namespace {
-// Returns false iff an exception was thrown.
-bool MaybeSpawnNativeContextIndependentCompilationJob(
- Isolate* isolate, Handle<JSFunction> function, ConcurrencyMode mode) {
- if (!FLAG_turbo_nci) return true; // Nothing to do.
- return Compiler::CompileOptimized(isolate, function, mode,
- CodeKind::NATIVE_CONTEXT_INDEPENDENT);
-}
-
Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
ConcurrencyMode mode) {
StackLimitCheck check(isolate);
@@ -50,34 +42,12 @@ Object CompileOptimized(Isolate* isolate, Handle<JSFunction> function,
return ReadOnlyRoots(isolate).exception();
}
- // Possibly compile for NCI caching.
- if (!MaybeSpawnNativeContextIndependentCompilationJob(isolate, function,
- mode)) {
- return ReadOnlyRoots(isolate).exception();
- }
-
// As a post-condition of CompileOptimized, the function *must* be compiled,
// i.e. the installed Code object must not be the CompileLazy builtin.
DCHECK(function->is_compiled());
return function->code();
}
-void TryInstallNCICode(Isolate* isolate, Handle<JSFunction> function,
- Handle<SharedFunctionInfo> sfi,
- IsCompiledScope* is_compiled_scope) {
- // This function should only be called if there's a possibility that cached
- // code exists.
- DCHECK(sfi->may_have_cached_code());
- DCHECK_EQ(function->shared(), *sfi);
-
- Handle<Code> code;
- if (sfi->TryGetCachedCode(isolate).ToHandle(&code)) {
- function->set_code(*code, kReleaseStore);
- JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
- if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceHit(sfi, code);
- }
-}
-
} // namespace
RUNTIME_FUNCTION(Runtime_CompileLazy) {
@@ -104,9 +74,6 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
&is_compiled_scope)) {
return ReadOnlyRoots(isolate).exception();
}
- if (sfi->may_have_cached_code()) {
- TryInstallNCICode(isolate, function, sfi, &is_compiled_scope);
- }
DCHECK(function->is_compiled());
return function->code();
}
@@ -127,18 +94,6 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
return baseline_code;
}
-RUNTIME_FUNCTION(Runtime_TryInstallNCICode) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- DCHECK(function->is_compiled());
- Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
- IsCompiledScope is_compiled_scope(*sfi, isolate);
- TryInstallNCICode(isolate, function, sfi, &is_compiled_scope);
- DCHECK(function->is_compiled());
- return function->code();
-}
-
RUNTIME_FUNCTION(Runtime_CompileOptimized_Concurrent) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -276,6 +231,14 @@ RUNTIME_FUNCTION(Runtime_ObserveNode) {
return *obj;
}
+RUNTIME_FUNCTION(Runtime_VerifyType) {
+ // %VerifyType has no effect in the interpreter.
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+ return *obj;
+}
+
static bool IsSuitableForOnStackReplacement(Isolate* isolate,
Handle<JSFunction> function) {
// Keep track of whether we've succeeded in optimizing.
@@ -356,16 +319,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
PrintF(scope.file(), " at OSR bytecode offset %d]\n", osr_offset.ToInt());
}
maybe_result =
- Compiler::GetOptimizedCodeForOSR(function, osr_offset, frame);
-
- // Possibly compile for NCI caching.
- if (!MaybeSpawnNativeContextIndependentCompilationJob(
- isolate, function,
- isolate->concurrent_recompilation_enabled()
- ? ConcurrencyMode::kConcurrent
- : ConcurrencyMode::kNotConcurrent)) {
- return Object();
- }
+ Compiler::GetOptimizedCodeForOSR(isolate, function, osr_offset, frame);
}
// Check whether we ended up with usable optimized code.
diff --git a/chromium/v8/src/runtime/runtime-debug.cc b/chromium/v8/src/runtime/runtime-debug.cc
index 4ffcaf23978..90ae087d209 100644
--- a/chromium/v8/src/runtime/runtime-debug.cc
+++ b/chromium/v8/src/runtime/runtime-debug.cc
@@ -61,13 +61,6 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_DebugBreakOnBytecode) {
handle(it.frame()->function(), isolate));
}
- // If we are dropping frames, there is no need to get a return value or
- // bytecode, since we will be restarting execution at a different frame.
- if (isolate->debug()->will_restart()) {
- return MakePair(ReadOnlyRoots(isolate).undefined_value(),
- Smi::FromInt(static_cast<uint8_t>(Bytecode::kIllegal)));
- }
-
// Return the handler from the original bytecode array.
DCHECK(it.frame()->is_interpreted());
InterpretedFrame* interpreted_frame =
@@ -157,11 +150,11 @@ RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
return ReadOnlyRoots(isolate).undefined_value();
}
+namespace {
+
template <class IteratorType>
-static MaybeHandle<JSArray> GetIteratorInternalProperties(
- Isolate* isolate, Handle<IteratorType> object) {
- Factory* factory = isolate->factory();
- Handle<IteratorType> iterator = Handle<IteratorType>::cast(object);
+static Handle<ArrayList> AddIteratorInternalProperties(
+ Isolate* isolate, Handle<ArrayList> result, Handle<IteratorType> iterator) {
const char* kind = nullptr;
switch (iterator->map().instance_type()) {
case JS_MAP_KEY_ITERATOR_TYPE:
@@ -179,62 +172,60 @@ static MaybeHandle<JSArray> GetIteratorInternalProperties(
UNREACHABLE();
}
- Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
- Handle<String> has_more =
- factory->NewStringFromAsciiChecked("[[IteratorHasMore]]");
- result->set(0, *has_more);
- result->set(1, isolate->heap()->ToBoolean(iterator->HasMore()));
-
- Handle<String> index =
- factory->NewStringFromAsciiChecked("[[IteratorIndex]]");
- result->set(2, *index);
- result->set(3, iterator->index());
-
- Handle<String> iterator_kind =
- factory->NewStringFromAsciiChecked("[[IteratorKind]]");
- result->set(4, *iterator_kind);
- Handle<String> kind_str = factory->NewStringFromAsciiChecked(kind);
- result->set(5, *kind_str);
- return factory->NewJSArrayWithElements(result);
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[IteratorHasMore]]"),
+ isolate->factory()->ToBoolean(iterator->HasMore()));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[IteratorIndex]]"),
+ handle(iterator->index(), isolate));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[IteratorKind]]"),
+ isolate->factory()->NewStringFromAsciiChecked(kind));
+ return result;
}
+} // namespace
MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<Object> object) {
- Factory* factory = isolate->factory();
+ auto result = ArrayList::New(isolate, 8 * 2);
+ if (object->IsJSObject()) {
+ PrototypeIterator iter(isolate, Handle<JSObject>::cast(object));
+ Handle<Object> prototype = PrototypeIterator::GetCurrent(iter);
+ if (!prototype->IsNull(isolate)) {
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromStaticChars("[[Prototype]]"),
+ prototype);
+ }
+ }
if (object->IsJSBoundFunction()) {
Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object);
- Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
- Handle<String> target =
- factory->NewStringFromAsciiChecked("[[TargetFunction]]");
- result->set(0, *target);
- result->set(1, function->bound_target_function());
-
- Handle<String> bound_this =
- factory->NewStringFromAsciiChecked("[[BoundThis]]");
- result->set(2, *bound_this);
- result->set(3, function->bound_this());
-
- Handle<String> bound_args =
- factory->NewStringFromAsciiChecked("[[BoundArgs]]");
- result->set(4, *bound_args);
- Handle<FixedArray> bound_arguments =
- factory->CopyFixedArray(handle(function->bound_arguments(), isolate));
- Handle<JSArray> arguments_array =
- factory->NewJSArrayWithElements(bound_arguments);
- result->set(5, *arguments_array);
- return factory->NewJSArrayWithElements(result);
- }
- if (object->IsJSMapIterator()) {
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[TargetFunction]]"),
+ handle(function->bound_target_function(), isolate));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[BoundThis]]"),
+ handle(function->bound_this(), isolate));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[BoundArgs]]"),
+ isolate->factory()->NewJSArrayWithElements(
+ isolate->factory()->CopyFixedArray(
+ handle(function->bound_arguments(), isolate))));
+ } else if (object->IsJSMapIterator()) {
Handle<JSMapIterator> iterator = Handle<JSMapIterator>::cast(object);
- return GetIteratorInternalProperties(isolate, iterator);
- }
- if (object->IsJSSetIterator()) {
+ result = AddIteratorInternalProperties(isolate, result, iterator);
+ } else if (object->IsJSSetIterator()) {
Handle<JSSetIterator> iterator = Handle<JSSetIterator>::cast(object);
- return GetIteratorInternalProperties(isolate, iterator);
- }
- if (object->IsJSGeneratorObject()) {
+ result = AddIteratorInternalProperties(isolate, result, iterator);
+ } else if (object->IsJSGeneratorObject()) {
Handle<JSGeneratorObject> generator =
Handle<JSGeneratorObject>::cast(object);
@@ -247,163 +238,131 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
DCHECK(generator->is_suspended());
}
- Handle<FixedArray> result = factory->NewFixedArray(2 * 3);
- Handle<String> generator_status =
- factory->NewStringFromAsciiChecked("[[GeneratorState]]");
- result->set(0, *generator_status);
- Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
- result->set(1, *status_str);
-
- Handle<String> function =
- factory->NewStringFromAsciiChecked("[[GeneratorFunction]]");
- result->set(2, *function);
- result->set(3, generator->function());
-
- Handle<String> receiver =
- factory->NewStringFromAsciiChecked("[[GeneratorReceiver]]");
- result->set(4, *receiver);
- result->set(5, generator->receiver());
- return factory->NewJSArrayWithElements(result);
- }
- if (object->IsJSPromise()) {
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[GeneratorState]]"),
+ isolate->factory()->NewStringFromAsciiChecked(status));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[GeneratorFunction]]"),
+ handle(generator->function(), isolate));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[GeneratorReceiver]]"),
+ handle(generator->receiver(), isolate));
+ } else if (object->IsJSPromise()) {
Handle<JSPromise> promise = Handle<JSPromise>::cast(object);
- const char* status = JSPromise::Status(promise->status());
- Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
- Handle<String> promise_status =
- factory->NewStringFromAsciiChecked("[[PromiseState]]");
- result->set(0, *promise_status);
- Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
- result->set(1, *status_str);
-
- Handle<Object> value_obj(promise->status() == Promise::kPending
- ? ReadOnlyRoots(isolate).undefined_value()
- : promise->result(),
- isolate);
- Handle<String> promise_value =
- factory->NewStringFromAsciiChecked("[[PromiseResult]]");
- result->set(2, *promise_value);
- result->set(3, *value_obj);
- return factory->NewJSArrayWithElements(result);
- }
- if (object->IsJSProxy()) {
- Handle<JSProxy> js_proxy = Handle<JSProxy>::cast(object);
- Handle<FixedArray> result = factory->NewFixedArray(3 * 2);
- Handle<String> handler_str =
- factory->NewStringFromAsciiChecked("[[Handler]]");
- result->set(0, *handler_str);
- result->set(1, js_proxy->handler());
-
- Handle<String> target_str =
- factory->NewStringFromAsciiChecked("[[Target]]");
- result->set(2, *target_str);
- result->set(3, js_proxy->target());
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[PromiseState]]"),
+ isolate->factory()->NewStringFromAsciiChecked(
+ JSPromise::Status(promise->status())));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[PromiseResult]]"),
+ promise->status() == Promise::kPending
+ ? isolate->factory()->undefined_value()
+ : handle(promise->result(), isolate));
+ } else if (object->IsJSProxy()) {
+ Handle<JSProxy> js_proxy = Handle<JSProxy>::cast(object);
- Handle<String> is_revoked_str =
- factory->NewStringFromAsciiChecked("[[IsRevoked]]");
- result->set(4, *is_revoked_str);
- result->set(5, isolate->heap()->ToBoolean(js_proxy->IsRevoked()));
- return factory->NewJSArrayWithElements(result);
- }
- if (object->IsJSPrimitiveWrapper()) {
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[Handler]]"),
+ handle(js_proxy->handler(), isolate));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[Target]]"),
+ handle(js_proxy->target(), isolate));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[IsRevoked]]"),
+ isolate->factory()->ToBoolean(js_proxy->IsRevoked()));
+ } else if (object->IsJSPrimitiveWrapper()) {
Handle<JSPrimitiveWrapper> js_value =
Handle<JSPrimitiveWrapper>::cast(object);
- Handle<FixedArray> result = factory->NewFixedArray(2);
- Handle<String> primitive_value =
- factory->NewStringFromAsciiChecked("[[PrimitiveValue]]");
- result->set(0, *primitive_value);
- result->set(1, js_value->value());
- return factory->NewJSArrayWithElements(result);
- }
- if (object->IsJSArrayBuffer()) {
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[PrimitiveValue]]"),
+ handle(js_value->value(), isolate));
+ } else if (object->IsJSArrayBuffer()) {
Handle<JSArrayBuffer> js_array_buffer = Handle<JSArrayBuffer>::cast(object);
if (js_array_buffer->was_detached()) {
// Mark a detached JSArrayBuffer and such and don't even try to
// create views for it, since the TypedArray constructors will
// throw a TypeError when the underlying buffer is detached.
- Handle<FixedArray> result = factory->NewFixedArray(1 * 2);
- Handle<String> is_detached_str =
- factory->NewStringFromAsciiChecked("[[IsDetached]]");
- result->set(0, *is_detached_str);
- result->set(1, isolate->heap()->ToBoolean(true));
- return factory->NewJSArrayWithElements(result, PACKED_ELEMENTS);
- }
- const size_t byte_length = js_array_buffer->byte_length();
- static const ExternalArrayType kTypes[] = {
- kExternalInt8Array,
- kExternalUint8Array,
- kExternalInt16Array,
- kExternalInt32Array,
- };
- Handle<FixedArray> result =
- factory->NewFixedArray((3 + arraysize(kTypes)) * 2);
- int index = 0;
- for (auto type : kTypes) {
- switch (type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
- case kExternal##Type##Array: { \
- if ((byte_length % sizeof(ctype)) != 0) continue; \
- Handle<String> typed_array_str = \
- factory->NewStringFromStaticChars("[[" #Type "Array]]"); \
- Handle<JSTypedArray> js_typed_array = \
- factory->NewJSTypedArray(kExternal##Type##Array, js_array_buffer, 0, \
- byte_length / sizeof(ctype)); \
- result->set(index++, *typed_array_str); \
- result->set(index++, *js_typed_array); \
- break; \
- }
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[IsDetached]]"),
+ isolate->factory()->true_value());
+ } else {
+ const size_t byte_length = js_array_buffer->byte_length();
+ static const ExternalArrayType kTypes[] = {
+ kExternalInt8Array,
+ kExternalUint8Array,
+ kExternalInt16Array,
+ kExternalInt32Array,
+ };
+ for (auto type : kTypes) {
+ switch (type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \
+ case kExternal##Type##Array: { \
+ if ((byte_length % sizeof(ctype)) != 0) continue; \
+ result = ArrayList::Add( \
+ isolate, result, \
+ isolate->factory()->NewStringFromStaticChars("[[" #Type "Array]]"), \
+ isolate->factory()->NewJSTypedArray(kExternal##Type##Array, \
+ js_array_buffer, 0, \
+ byte_length / sizeof(ctype))); \
+ break; \
+ }
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
default:
UNREACHABLE();
+ }
+ }
+ result =
+ ArrayList::Add(isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "[[ArrayBufferByteLength]]"),
+ isolate->factory()->NewNumberFromSize(byte_length));
+
+ // Use the backing store pointer as a unique ID
+ EmbeddedVector<char, 32> buffer_data_vec;
+ int len =
+ SNPrintF(buffer_data_vec, V8PRIxPTR_FMT,
+ reinterpret_cast<Address>(js_array_buffer->backing_store()));
+ result = ArrayList::Add(
+ isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked("[[ArrayBufferData]]"),
+ isolate->factory()->InternalizeUtf8String(
+ buffer_data_vec.SubVector(0, len)));
+
+ Handle<Symbol> memory_symbol =
+ isolate->factory()->array_buffer_wasm_memory_symbol();
+ Handle<Object> memory_object =
+ JSObject::GetDataProperty(js_array_buffer, memory_symbol);
+ if (!memory_object->IsUndefined(isolate)) {
+ result = ArrayList::Add(isolate, result,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "[[WebAssemblyMemory]]"),
+ memory_object);
}
}
- Handle<String> byte_length_str =
- factory->NewStringFromAsciiChecked("[[ArrayBufferByteLength]]");
- Handle<Object> byte_length_obj = factory->NewNumberFromSize(byte_length);
- result->set(index++, *byte_length_str);
- result->set(index++, *byte_length_obj);
-
- Handle<String> buffer_data_str =
- factory->NewStringFromAsciiChecked("[[ArrayBufferData]]");
- // Use the backing store pointer as a unique ID
- EmbeddedVector<char, 32> buffer_data_vec;
- int len =
- SNPrintF(buffer_data_vec, V8PRIxPTR_FMT,
- reinterpret_cast<Address>(js_array_buffer->backing_store()));
- Handle<String> buffer_data_obj =
- factory->InternalizeUtf8String(buffer_data_vec.SubVector(0, len));
- result->set(index++, *buffer_data_str);
- result->set(index++, *buffer_data_obj);
-
#if V8_ENABLE_WEBASSEMBLY
- Handle<Symbol> memory_symbol = factory->array_buffer_wasm_memory_symbol();
- Handle<Object> memory_object =
- JSObject::GetDataProperty(js_array_buffer, memory_symbol);
- if (!memory_object->IsUndefined(isolate)) {
- Handle<String> buffer_memory_str =
- factory->NewStringFromAsciiChecked("[[WebAssemblyMemory]]");
- Handle<WasmMemoryObject> buffer_memory_obj =
- Handle<WasmMemoryObject>::cast(memory_object);
- result->set(index++, *buffer_memory_str);
- result->set(index++, *buffer_memory_obj);
- }
+ } else if (object->IsWasmInstanceObject()) {
+ result = AddWasmInstanceObjectInternalProperties(
+ isolate, result, Handle<WasmInstanceObject>::cast(object));
+ } else if (object->IsWasmModuleObject()) {
+ result = AddWasmModuleObjectInternalProperties(
+ isolate, result, Handle<WasmModuleObject>::cast(object));
#endif // V8_ENABLE_WEBASSEMBLY
-
- return factory->NewJSArrayWithElements(result, PACKED_ELEMENTS, index);
}
-#if V8_ENABLE_WEBASSEMBLY
- if (object->IsWasmInstanceObject()) {
- return GetWasmInstanceObjectInternalProperties(
- Handle<WasmInstanceObject>::cast(object));
- }
- if (object->IsWasmModuleObject()) {
- return GetWasmModuleObjectInternalProperties(
- Handle<WasmModuleObject>::cast(object));
- }
-#endif // V8_ENABLE_WEBASSEMBLY
- return factory->NewJSArray(0);
+ return isolate->factory()->NewJSArrayWithElements(
+ ArrayList::Elements(isolate, result), PACKED_ELEMENTS);
}
RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
@@ -718,7 +677,8 @@ RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
if (isolate->debug()->needs_check_on_function_call()) {
// Ensure that the callee will perform debug check on function call too.
- Deoptimizer::DeoptimizeFunction(*fun);
+ Handle<SharedFunctionInfo> shared(fun->shared(), isolate);
+ isolate->debug()->DeoptimizeFunction(shared);
if (isolate->debug()->last_step_action() >= StepIn ||
isolate->debug()->break_on_next_function_call()) {
DCHECK_EQ(isolate->debug_execution_mode(), DebugInfo::kBreakpoints);
@@ -902,22 +862,9 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchScript) {
case v8::debug::LiveEditResult::BLOCKED_BY_RUNNING_GENERATOR:
return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
"LiveEdit failed: BLOCKED_BY_RUNNING_GENERATOR"));
- case v8::debug::LiveEditResult::BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME:
- return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
- "LiveEdit failed: BLOCKED_BY_FUNCTION_ABOVE_BREAK_FRAME"));
- case v8::debug::LiveEditResult::
- BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME:
- return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
- "LiveEdit failed: BLOCKED_BY_FUNCTION_BELOW_NON_DROPPABLE_FRAME"));
case v8::debug::LiveEditResult::BLOCKED_BY_ACTIVE_FUNCTION:
return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
"LiveEdit failed: BLOCKED_BY_ACTIVE_FUNCTION"));
- case v8::debug::LiveEditResult::BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME:
- return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
- "LiveEdit failed: BLOCKED_BY_NEW_TARGET_IN_RESTART_FRAME"));
- case v8::debug::LiveEditResult::FRAME_RESTART_IS_NOT_SUPPORTED:
- return isolate->Throw(*isolate->factory()->NewStringFromAsciiChecked(
- "LiveEdit failed: FRAME_RESTART_IS_NOT_SUPPORTED"));
case v8::debug::LiveEditResult::OK:
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -931,7 +878,7 @@ RUNTIME_FUNCTION(Runtime_ProfileCreateSnapshotDataBlob) {
// Used only by the test/memory/Memory.json benchmark. This creates a snapshot
// blob and outputs various statistics around it.
- DCHECK(FLAG_profile_deserialization);
+ DCHECK(FLAG_profile_deserialization && FLAG_serialization_statistics);
DisableEmbeddedBlobRefcounting();
diff --git a/chromium/v8/src/runtime/runtime-internal.cc b/chromium/v8/src/runtime/runtime-internal.cc
index 245d1fd77e1..6d569c2be2e 100644
--- a/chromium/v8/src/runtime/runtime-internal.cc
+++ b/chromium/v8/src/runtime/runtime-internal.cc
@@ -7,6 +7,7 @@
#include "src/api/api.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/prettyprinter.h"
+#include "src/baseline/baseline-osr-inl.h"
#include "src/baseline/baseline.h"
#include "src/builtins/builtins.h"
#include "src/common/message-template.h"
@@ -139,6 +140,7 @@ const char* ElementsKindToType(ElementsKind fixed_elements_kind) {
return #Type "Array";
TYPED_ARRAYS(ELEMENTS_KIND_CASE)
+ RAB_GSAB_TYPED_ARRAYS_WITH_TYPED_ARRAY_TYPE(ELEMENTS_KIND_CASE)
#undef ELEMENTS_KIND_CASE
default:
@@ -342,23 +344,13 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
// a non zero invocation count so we can inline functions.
function->feedback_vector().set_invocation_count(1);
if (FLAG_sparkplug) {
- if (Compiler::CompileBaseline(isolate, function,
- Compiler::CLEAR_EXCEPTION,
- &is_compiled_scope)) {
- if (FLAG_use_osr) {
- JavaScriptFrameIterator it(isolate);
- DCHECK(it.frame()->is_unoptimized());
- UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
- if (FLAG_trace_osr) {
- CodeTracer::Scope scope(isolate->GetCodeTracer());
- PrintF(
- scope.file(),
- "[OSR - Entry at OSR bytecode offset %d into baseline code]\n",
- frame->GetBytecodeOffset());
- }
- frame->GetBytecodeArray().set_osr_loop_nesting_level(
- AbstractCode::kMaxLoopNestingMarker);
- }
+ if (V8_LIKELY(FLAG_use_osr)) {
+ JavaScriptFrameIterator it(isolate);
+ DCHECK(it.frame()->is_unoptimized());
+ UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
+ OSRInterpreterFrameToBaseline(isolate, function, frame);
+ } else {
+ OSRInterpreterFrameToBaseline(isolate, function, nullptr);
}
}
return ReadOnlyRoots(isolate).undefined_value();
@@ -528,6 +520,7 @@ RUNTIME_FUNCTION(Runtime_IncrementUseCounter) {
RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
HandleScope scope(isolate);
DCHECK_LE(args.length(), 2);
+#ifdef V8_RUNTIME_CALL_STATS
// Append any worker thread runtime call stats to the main table before
// printing.
isolate->counters()->worker_thread_runtime_call_stats()->AddToMainTable(
@@ -570,6 +563,7 @@ RUNTIME_FUNCTION(Runtime_GetAndResetRuntimeCallStats) {
} else {
std::fflush(f);
}
+#endif // V8_RUNTIME_CALL_STATS
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/chromium/v8/src/runtime/runtime-literals.cc b/chromium/v8/src/runtime/runtime-literals.cc
index 5ee7bcd4c53..c4285f2403b 100644
--- a/chromium/v8/src/runtime/runtime-literals.cc
+++ b/chromium/v8/src/runtime/runtime-literals.cc
@@ -218,6 +218,7 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS:
TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
// Typed elements cannot be created using an object literal.
UNREACHABLE();
@@ -291,7 +292,7 @@ class AllocationSiteCreationContext : public AllocationSiteContext {
}
void ExitScope(Handle<AllocationSite> scope_site, Handle<JSObject> object) {
if (object.is_null()) return;
- scope_site->set_boilerplate(*object);
+ scope_site->set_boilerplate(*object, kReleaseStore);
if (FLAG_trace_creation_allocation_sites) {
bool top_level =
!scope_site.is_null() && top().is_identical_to(scope_site);
diff --git a/chromium/v8/src/runtime/runtime-object.cc b/chromium/v8/src/runtime/runtime-object.cc
index a98ad2e6dc4..13493125da1 100644
--- a/chromium/v8/src/runtime/runtime-object.cc
+++ b/chromium/v8/src/runtime/runtime-object.cc
@@ -16,8 +16,10 @@
#include "src/logging/counters.h"
#include "src/objects/hash-table-inl.h"
#include "src/objects/js-array-inl.h"
+#include "src/objects/map-updater.h"
#include "src/objects/property-descriptor-object.h"
#include "src/objects/property-descriptor.h"
+#include "src/objects/property-details.h"
#include "src/objects/swiss-name-dictionary-inl.h"
#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
@@ -25,23 +27,22 @@
namespace v8 {
namespace internal {
-MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
- Handle<Object> holder,
- Handle<Object> key,
- Handle<Object> receiver,
- bool* is_found) {
+MaybeHandle<Object> Runtime::GetObjectProperty(
+ Isolate* isolate, Handle<Object> lookup_start_object, Handle<Object> key,
+ Handle<Object> receiver, bool* is_found) {
if (receiver.is_null()) {
- receiver = holder;
+ receiver = lookup_start_object;
}
- if (holder->IsNullOrUndefined(isolate)) {
- ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, holder, key);
+ if (lookup_start_object->IsNullOrUndefined(isolate)) {
+ ErrorUtils::ThrowLoadFromNullOrUndefined(isolate, lookup_start_object, key);
return MaybeHandle<Object>();
}
bool success = false;
LookupIterator::Key lookup_key(isolate, key, &success);
if (!success) return MaybeHandle<Object>();
- LookupIterator it = LookupIterator(isolate, receiver, lookup_key, holder);
+ LookupIterator it =
+ LookupIterator(isolate, receiver, lookup_key, lookup_start_object);
MaybeHandle<Object> result = Object::GetProperty(&it);
if (is_found) *is_found = it.IsFound();
@@ -58,12 +59,12 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
: name_string;
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kInvalidPrivateBrand,
- class_name, holder),
+ class_name, lookup_start_object),
Object);
}
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kInvalidPrivateMemberRead,
- name_string, holder),
+ name_string, lookup_start_object),
Object);
}
return result;
@@ -95,6 +96,28 @@ MaybeHandle<Object> Runtime::HasProperty(Isolate* isolate,
namespace {
+// This function sets the sentinel value in a deleted field. Thes sentinel has
+// to look like a proper standalone object because the slack tracking may
+// complete at any time. For this reason we use the filler map word.
+// If V8_MAP_PACKING is enabled, then the filler map word is a packed filler
+// map. Otherwise, the filler map word is the same as the filler map.
+inline void ClearField(Isolate* isolate, JSObject object, FieldIndex index) {
+ if (index.is_inobject()) {
+ MapWord filler_map_word =
+ ReadOnlyRoots(isolate).one_pointer_filler_map_word();
+#ifndef V8_MAP_PACKING
+ DCHECK_EQ(filler_map_word.ToMap(),
+ ReadOnlyRoots(isolate).one_pointer_filler_map());
+#endif
+ int offset = index.offset();
+ TaggedField<MapWord>::Release_Store(object, offset, filler_map_word);
+ } else {
+ object.property_array().set(
+ index.outobject_array_index(),
+ ReadOnlyRoots(isolate).one_pointer_filler_map());
+ }
+}
+
void GeneralizeAllTransitionsToFieldAsMutable(Isolate* isolate, Handle<Map> map,
Handle<Name> name) {
InternalIndex descriptor(map->NumberOfOwnDescriptors());
@@ -131,9 +154,9 @@ void GeneralizeAllTransitionsToFieldAsMutable(Isolate* isolate, Handle<Map> map,
Handle<FieldType> field_type(
target->instance_descriptors(isolate).GetFieldType(descriptor),
isolate);
- Map::GeneralizeField(isolate, target, descriptor,
- PropertyConstness::kMutable, details.representation(),
- field_type);
+ MapUpdater::GeneralizeField(isolate, target, descriptor,
+ PropertyConstness::kMutable,
+ details.representation(), field_type);
DCHECK_EQ(PropertyConstness::kMutable, target->instance_descriptors(isolate)
.GetDetails(descriptor)
.constness());
@@ -191,8 +214,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Clear out the properties backing store.
receiver->SetProperties(ReadOnlyRoots(isolate).empty_fixed_array());
} else {
- Object filler = ReadOnlyRoots(isolate).one_pointer_filler_map();
- JSObject::cast(*receiver).FastPropertyAtPut(index, filler);
+ ClearField(isolate, JSObject::cast(*receiver), index);
// We must clear any recorded slot for the deleted property, because
// subsequent object modifications might put a raw double there.
// Slot clearing is the reason why this entire function cannot currently
@@ -203,8 +225,10 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// have recorded slots in free space.
isolate->heap()->ClearRecordedSlot(*receiver,
receiver->RawField(index.offset()));
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
- chunk->InvalidateRecordedSlots(*receiver);
+ if (!FLAG_enable_third_party_heap) {
+ MemoryChunk* chunk = MemoryChunk::FromHeapObject(*receiver);
+ chunk->InvalidateRecordedSlots(*receiver);
+ }
}
}
}
@@ -214,7 +238,7 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// the "deoptimize dependent code" mechanism.
receiver_map->NotifyLeafMapLayoutChange(isolate);
// Finally, perform the map rollback.
- receiver->synchronized_set_map(*parent_map);
+ receiver->set_map(*parent_map, kReleaseStore);
#if VERIFY_HEAP
receiver->HeapObjectVerify(isolate);
receiver->property_array().PropertyArrayVerify(isolate);
@@ -682,15 +706,15 @@ RUNTIME_FUNCTION(Runtime_JSReceiverSetPrototypeOfDontThrow) {
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
DCHECK(args.length() == 3 || args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, holder_obj, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, lookup_start_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
- Handle<Object> receiver_obj = holder_obj;
+ Handle<Object> receiver_obj = lookup_start_obj;
if (args.length() == 3) {
CHECK(args[2].IsObject());
receiver_obj = args.at<Object>(2);
}
- // Fast cases for getting named properties of the holder JSObject
+ // Fast cases for getting named properties of the lookup_start_obj JSObject
// itself.
//
// The global proxy objects has to be excluded since LookupOwn on
@@ -708,18 +732,19 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
if (key_obj->IsString() && String::cast(*key_obj).AsArrayIndex(&index)) {
key_obj = isolate->factory()->NewNumberFromUint(index);
}
- if (holder_obj->IsJSObject()) {
- if (!holder_obj->IsJSGlobalProxy() && !holder_obj->IsAccessCheckNeeded() &&
- key_obj->IsName()) {
- Handle<JSObject> holder = Handle<JSObject>::cast(holder_obj);
+ if (lookup_start_obj->IsJSObject()) {
+ Handle<JSObject> lookup_start_object =
+ Handle<JSObject>::cast(lookup_start_obj);
+ if (!lookup_start_object->IsJSGlobalProxy() &&
+ !lookup_start_object->IsAccessCheckNeeded() && key_obj->IsName()) {
Handle<Name> key = Handle<Name>::cast(key_obj);
key_obj = key = isolate->factory()->InternalizeName(key);
DisallowGarbageCollection no_gc;
- if (holder->IsJSGlobalObject()) {
+ if (lookup_start_object->IsJSGlobalObject()) {
// Attempt dictionary lookup.
- GlobalDictionary dictionary =
- JSGlobalObject::cast(*holder).global_dictionary(kAcquireLoad);
+ GlobalDictionary dictionary = JSGlobalObject::cast(*lookup_start_object)
+ .global_dictionary(kAcquireLoad);
InternalIndex entry = dictionary.FindEntry(isolate, key);
if (entry.is_found()) {
PropertyCell cell = dictionary.CellAt(entry);
@@ -729,17 +754,19 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
// If value is the hole (meaning, absent) do the general lookup.
}
}
- } else if (!holder->HasFastProperties()) {
+ } else if (!lookup_start_object->HasFastProperties()) {
// Attempt dictionary lookup.
if (V8_ENABLE_SWISS_NAME_DICTIONARY_BOOL) {
- SwissNameDictionary dictionary = holder->property_dictionary_swiss();
+ SwissNameDictionary dictionary =
+ lookup_start_object->property_dictionary_swiss();
InternalIndex entry = dictionary.FindEntry(isolate, *key);
if (entry.is_found() &&
(dictionary.DetailsAt(entry).kind() == kData)) {
return dictionary.ValueAt(entry);
}
} else {
- NameDictionary dictionary = holder->property_dictionary();
+ NameDictionary dictionary =
+ lookup_start_object->property_dictionary();
InternalIndex entry = dictionary.FindEntry(isolate, key);
if ((entry.is_found()) &&
(dictionary.DetailsAt(entry).kind() == kData)) {
@@ -754,22 +781,21 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
// transition elements to FAST_*_ELEMENTS to avoid excessive boxing of
// doubles for those future calls in the case that the elements would
// become PACKED_DOUBLE_ELEMENTS.
- Handle<JSObject> js_object = Handle<JSObject>::cast(holder_obj);
- ElementsKind elements_kind = js_object->GetElementsKind();
+ ElementsKind elements_kind = lookup_start_object->GetElementsKind();
if (IsDoubleElementsKind(elements_kind)) {
- if (Smi::ToInt(*key_obj) >= js_object->elements().length()) {
+ if (Smi::ToInt(*key_obj) >= lookup_start_object->elements().length()) {
elements_kind = IsHoleyElementsKind(elements_kind) ? HOLEY_ELEMENTS
: PACKED_ELEMENTS;
- JSObject::TransitionElementsKind(js_object, elements_kind);
+ JSObject::TransitionElementsKind(lookup_start_object, elements_kind);
}
} else {
DCHECK(IsSmiOrObjectElementsKind(elements_kind) ||
!IsFastElementsKind(elements_kind));
}
}
- } else if (holder_obj->IsString() && key_obj->IsSmi()) {
+ } else if (lookup_start_obj->IsString() && key_obj->IsSmi()) {
// Fast case for string indexing using [] with a smi index.
- Handle<String> str = Handle<String>::cast(holder_obj);
+ Handle<String> str = Handle<String>::cast(lookup_start_obj);
int index = Handle<Smi>::cast(key_obj)->value();
if (index >= 0 && index < str->length()) {
Factory* factory = isolate->factory();
@@ -780,8 +806,8 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
// Fall back to GetObjectProperty.
RETURN_RESULT_OR_FAILURE(
- isolate,
- Runtime::GetObjectProperty(isolate, holder_obj, key_obj, receiver_obj));
+ isolate, Runtime::GetObjectProperty(isolate, lookup_start_obj, key_obj,
+ receiver_obj));
}
RUNTIME_FUNCTION(Runtime_SetKeyedProperty) {
@@ -947,11 +973,16 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
RUNTIME_FUNCTION(Runtime_GetDerivedMap) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, new_target, 1);
- RETURN_RESULT_OR_FAILURE(
- isolate, JSFunction::GetDerivedMap(isolate, target, new_target));
+ CONVERT_ARG_HANDLE_CHECKED(Object, rab_gsab, 2);
+ if (rab_gsab->IsTrue()) {
+ return *JSFunction::GetDerivedRabGsabMap(isolate, target, new_target);
+ } else {
+ RETURN_RESULT_OR_FAILURE(
+ isolate, JSFunction::GetDerivedMap(isolate, target, new_target));
+ }
}
RUNTIME_FUNCTION(Runtime_CompleteInobjectSlackTrackingForMap) {
diff --git a/chromium/v8/src/runtime/runtime-promise.cc b/chromium/v8/src/runtime/runtime-promise.cc
index c1ee96facca..0ade310cfb5 100644
--- a/chromium/v8/src/runtime/runtime-promise.cc
+++ b/chromium/v8/src/runtime/runtime-promise.cc
@@ -29,8 +29,8 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
// undefined, which we interpret as being a caught exception event.
rejected_promise = isolate->GetPromiseOnStackOnThrow();
}
- isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
- isolate->factory()->undefined_value());
+ isolate->RunAllPromiseHooks(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
isolate->debug()->OnPromiseReject(rejected_promise, value);
// Report only if we don't actually have a handler.
@@ -142,7 +142,7 @@ Handle<JSPromise> AwaitPromisesInitCommon(Isolate* isolate,
// hook for the throwaway promise (passing the {promise} as its
// parent).
Handle<JSPromise> throwaway = isolate->factory()->NewJSPromiseWithoutHook();
- isolate->RunPromiseHook(PromiseHookType::kInit, throwaway, promise);
+ isolate->RunAllPromiseHooks(PromiseHookType::kInit, throwaway, promise);
// On inspector side we capture async stack trace and store it by
// outer_promise->async_task_id when async function is suspended first time.
@@ -204,7 +204,7 @@ RUNTIME_FUNCTION(Runtime_AwaitPromisesInitOld) {
// Fire the init hook for the wrapper promise (that we created for the
// {value} previously).
- isolate->RunPromiseHook(PromiseHookType::kInit, promise, outer_promise);
+ isolate->RunAllPromiseHooks(PromiseHookType::kInit, promise, outer_promise);
return *AwaitPromisesInitCommon(isolate, value, promise, outer_promise,
reject_handler, is_predicted_as_caught);
}
@@ -260,17 +260,18 @@ RUNTIME_FUNCTION(Runtime_ResolvePromise) {
// takes care of the Error-related construction, e.g., stack traces.
RUNTIME_FUNCTION(Runtime_ConstructAggregateErrorHelper) {
HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, new_target, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, message, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, options, 3);
DCHECK_EQ(*target, *isolate->aggregate_error_function());
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- ErrorUtils::Construct(isolate, target, new_target, message));
+ ErrorUtils::Construct(isolate, target, new_target, message, options));
return *result;
}
@@ -299,6 +300,14 @@ RUNTIME_FUNCTION(Runtime_ConstructInternalAggregateErrorHelper) {
arg2 = args.at<Object>(3);
}
+ Handle<Object> options;
+ if (args.length() >= 5) {
+ CHECK(args[4].IsObject());
+ options = args.at<Object>(4);
+ } else {
+ options = isolate->factory()->undefined_value();
+ }
+
Handle<Object> message_string = MessageFormatter::Format(
isolate, MessageTemplate(message->value()), arg0, arg1, arg2);
@@ -306,8 +315,8 @@ RUNTIME_FUNCTION(Runtime_ConstructInternalAggregateErrorHelper) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
ErrorUtils::Construct(isolate, isolate->aggregate_error_function(),
- isolate->aggregate_error_function(),
- message_string));
+ isolate->aggregate_error_function(), message_string,
+ options));
return *result;
}
diff --git a/chromium/v8/src/runtime/runtime-regexp.cc b/chromium/v8/src/runtime/runtime-regexp.cc
index d18602f58f7..f80316e34dd 100644
--- a/chromium/v8/src/runtime/runtime-regexp.cc
+++ b/chromium/v8/src/runtime/runtime-regexp.cc
@@ -1696,7 +1696,7 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
- Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
+ Handle<FixedArray> elems = factory->NewFixedArray(1);
elems->set(0, *string);
return *factory->NewJSArrayWithElements(elems);
}
@@ -2004,5 +2004,13 @@ RUNTIME_FUNCTION(Runtime_IsRegExp) {
return isolate->heap()->ToBoolean(obj.IsJSRegExp());
}
+RUNTIME_FUNCTION(Runtime_RegExpStringFromFlags) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
+ Handle<String> flags = JSRegExp::StringFromFlags(isolate, regexp.GetFlags());
+ return *flags;
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-scopes.cc b/chromium/v8/src/runtime/runtime-scopes.cc
index 457f3729556..e925e1f7f99 100644
--- a/chromium/v8/src/runtime/runtime-scopes.cc
+++ b/chromium/v8/src/runtime/runtime-scopes.cc
@@ -510,7 +510,7 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
isolate->factory()->NewArgumentsObject(callee, argument_count);
if (argument_count) {
Handle<FixedArray> array =
- isolate->factory()->NewUninitializedFixedArray(argument_count);
+ isolate->factory()->NewFixedArray(argument_count);
DisallowGarbageCollection no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < argument_count; i++) {
@@ -859,7 +859,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot_SloppyHoisting) {
LanguageMode::kSloppy, lookup_flags));
}
-RUNTIME_FUNCTION(Runtime_StoreGlobalNoHoleCheckForReplLet) {
+RUNTIME_FUNCTION(Runtime_StoreGlobalNoHoleCheckForReplLetOrConst) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
diff --git a/chromium/v8/src/runtime/runtime-strings.cc b/chromium/v8/src/runtime/runtime-strings.cc
index 0acf6334d44..bd651c646c6 100644
--- a/chromium/v8/src/runtime/runtime-strings.cc
+++ b/chromium/v8/src/runtime/runtime-strings.cc
@@ -315,7 +315,7 @@ RUNTIME_FUNCTION(Runtime_StringToArray) {
int position = 0;
if (s->IsFlat() && s->IsOneByteRepresentation()) {
// Try using cached chars where possible.
- elements = isolate->factory()->NewUninitializedFixedArray(length);
+ elements = isolate->factory()->NewFixedArray(length);
DisallowGarbageCollection no_gc;
String::FlatContent content = s->GetFlatContent(no_gc);
diff --git a/chromium/v8/src/runtime/runtime-test-wasm.cc b/chromium/v8/src/runtime/runtime-test-wasm.cc
index 9c07441de55..54976dcc05b 100644
--- a/chromium/v8/src/runtime/runtime-test-wasm.cc
+++ b/chromium/v8/src/runtime/runtime-test-wasm.cc
@@ -158,8 +158,7 @@ RUNTIME_FUNCTION(Runtime_WasmTraceEnter) {
wasm::ModuleWireBytes wire_bytes =
wasm::ModuleWireBytes(frame->native_module()->wire_bytes());
wasm::WireBytesRef name_ref =
- module->lazily_generated_names.LookupFunctionName(
- wire_bytes, func_index, VectorOf(module->export_table));
+ module->lazily_generated_names.LookupFunctionName(wire_bytes, func_index);
wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
wasm::WasmCode* code = frame->wasm_code();
@@ -423,14 +422,10 @@ RUNTIME_FUNCTION(Runtime_WasmTraceMemory) {
frame->wasm_instance().memory_object().array_buffer().backing_store());
int func_index = frame->function_index();
int pos = frame->position();
- // TODO(titzer): eliminate dependency on WasmModule definition here.
- int func_start =
- frame->wasm_instance().module()->functions[func_index].code.offset();
wasm::ExecutionTier tier = frame->wasm_code()->is_liftoff()
? wasm::ExecutionTier::kLiftoff
: wasm::ExecutionTier::kTurbofan;
- wasm::TraceMemoryOperation(tier, info, func_index, pos - func_start,
- mem_start);
+ wasm::TraceMemoryOperation(tier, info, func_index, pos, mem_start);
return ReadOnlyRoots(isolate).undefined_value();
}
diff --git a/chromium/v8/src/runtime/runtime-test.cc b/chromium/v8/src/runtime/runtime-test.cc
index 0dd7368e39e..00a4124dcef 100644
--- a/chromium/v8/src/runtime/runtime-test.cc
+++ b/chromium/v8/src/runtime/runtime-test.cc
@@ -4,6 +4,7 @@
#include "src/api/api-inl.h"
#include "src/base/platform/mutex.h"
+#include "src/baseline/baseline-osr-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compiler.h"
#include "src/codegen/pending-optimization-table.h"
@@ -41,6 +42,11 @@ V8_WARN_UNUSED_RESULT Object CrashUnlessFuzzing(Isolate* isolate) {
return ReadOnlyRoots(isolate).undefined_value();
}
+// Returns |value| unless fuzzing is enabled, otherwise returns undefined_value.
+V8_WARN_UNUSED_RESULT Object ReturnFuzzSafe(Object value, Isolate* isolate) {
+ return FLAG_fuzzing ? ReadOnlyRoots(isolate).undefined_value() : value;
+}
+
// Assert that the given argument is a number within the Int32 range
// and convert it to int32_t. If the argument is not an Int32 we crash if not
// in fuzzing mode.
@@ -470,6 +476,37 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
return ReadOnlyRoots(isolate).undefined_value();
}
+RUNTIME_FUNCTION(Runtime_BaselineOsr) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0 || args.length() == 1);
+
+ Handle<JSFunction> function;
+
+ // The optional parameter determines the frame being targeted.
+ int stack_depth = 0;
+ if (args.length() == 1) {
+ if (!args[0].IsSmi()) return CrashUnlessFuzzing(isolate);
+ stack_depth = args.smi_at(0);
+ }
+
+ // Find the JavaScript function on the top of the stack.
+ JavaScriptFrameIterator it(isolate);
+ while (!it.done() && stack_depth--) it.Advance();
+ if (!it.done()) function = handle(it.frame()->function(), isolate);
+ if (function.is_null()) return CrashUnlessFuzzing(isolate);
+ if (!FLAG_sparkplug || !FLAG_use_osr) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+ if (!it.frame()->is_unoptimized()) {
+ return ReadOnlyRoots(isolate).undefined_value();
+ }
+
+ UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
+ OSRInterpreterFrameToBaseline(isolate, function, frame);
+
+ return ReadOnlyRoots(isolate).undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -576,6 +613,11 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (frame->is_optimized()) {
status |=
static_cast<int>(OptimizationStatus::kTopmostFrameIsTurboFanned);
+ } else if (frame->is_interpreted()) {
+ status |=
+ static_cast<int>(OptimizationStatus::kTopmostFrameIsInterpreted);
+ } else if (frame->is_baseline()) {
+ status |= static_cast<int>(OptimizationStatus::kTopmostFrameIsBaseline);
}
}
@@ -678,6 +720,7 @@ int FixedArrayLenFromSize(int size) {
}
void FillUpOneNewSpacePage(Isolate* isolate, Heap* heap) {
+ DCHECK(!FLAG_single_generation);
PauseAllocationObserversScope pause_observers(heap);
NewSpace* space = heap->new_space();
// We cannot rely on `space->limit()` to point to the end of the current page
@@ -990,6 +1033,25 @@ RUNTIME_FUNCTION(Runtime_InYoungGeneration) {
return isolate->heap()->ToBoolean(ObjectInYoungGeneration(obj));
}
+// Force pretenuring for the allocation site the passed object belongs to.
+RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
+ DisallowGarbageCollection no_gc;
+
+ if (args.length() != 1) return CrashUnlessFuzzing(isolate);
+ CONVERT_ARG_CHECKED(Object, arg, 0);
+ if (!arg.IsJSObject()) return CrashUnlessFuzzing(isolate);
+ JSObject object = JSObject::cast(arg);
+
+ Heap* heap = object.GetHeap();
+ AllocationMemento memento =
+ heap->FindAllocationMemento<Heap::kForRuntime>(object.map(), object);
+ if (memento.is_null())
+ return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
+ AllocationSite site = memento.GetAllocationSite();
+ heap->PretenureAllocationSiteOnNextCollection(site);
+ return ReturnFuzzSafe(ReadOnlyRoots(isolate).true_value(), isolate);
+}
+
namespace {
v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback(
@@ -1257,6 +1319,7 @@ RUNTIME_FUNCTION(Runtime_EnableCodeLoggingForTesting) {
Handle<String> source) final {}
void CodeMoveEvent(AbstractCode from, AbstractCode to) final {}
void SharedFunctionInfoMoveEvent(Address from, Address to) final {}
+ void NativeContextMoveEvent(Address from, Address to) final {}
void CodeMovingGCEvent() final {}
void CodeDisableOptEvent(Handle<AbstractCode> code,
Handle<SharedFunctionInfo> shared) final {}
@@ -1294,5 +1357,11 @@ RUNTIME_FUNCTION(Runtime_NewRegExpWithBacktrackLimit) {
isolate, JSRegExp::New(isolate, pattern, flags, backtrack_limit));
}
+RUNTIME_FUNCTION(Runtime_Is64Bit) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->heap()->ToBoolean(kSystemPointerSize == 8);
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/runtime/runtime-typedarray.cc b/chromium/v8/src/runtime/runtime-typedarray.cc
index b73cfc9769a..5d0fc35944a 100644
--- a/chromium/v8/src/runtime/runtime-typedarray.cc
+++ b/chromium/v8/src/runtime/runtime-typedarray.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/base/atomicops.h"
#include "src/common/message-template.h"
#include "src/execution/arguments-inl.h"
#include "src/heap/factory.h"
@@ -52,6 +53,15 @@ RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
return *holder->GetBuffer();
}
+RUNTIME_FUNCTION(Runtime_GrowableSharedArrayBufferByteLength) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
+
+ CHECK_EQ(0, array_buffer->byte_length());
+ size_t byte_length = array_buffer->GetBackingStore()->byte_length();
+ return *isolate->factory()->NewNumberFromSize(byte_length);
+}
namespace {
@@ -84,6 +94,14 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, array, 0);
DCHECK(!array->WasDetached());
+#if V8_OS_LINUX
+ if (FLAG_multi_mapped_mock_allocator) {
+ // Sorting is meaningless with the mock allocator, and std::sort
+ // might crash (because aliasing elements violate its assumptions).
+ return *array;
+ }
+#endif
+
size_t length = array->length();
DCHECK_LT(1, length);
@@ -108,7 +126,8 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
offheap_copy.resize(bytes);
data_copy_ptr = &offheap_copy[0];
}
- std::memcpy(data_copy_ptr, static_cast<void*>(array->DataPtr()), bytes);
+ base::Relaxed_Memcpy(static_cast<base::Atomic8*>(data_copy_ptr),
+ static_cast<base::Atomic8*>(array->DataPtr()), bytes);
}
DisallowGarbageCollection no_gc;
@@ -147,7 +166,8 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
DCHECK_NOT_NULL(data_copy_ptr);
DCHECK_NE(array_copy.is_null(), offheap_copy.empty());
const size_t bytes = array->byte_length();
- std::memcpy(static_cast<void*>(array->DataPtr()), data_copy_ptr, bytes);
+ base::Relaxed_Memcpy(static_cast<base::Atomic8*>(array->DataPtr()),
+ static_cast<base::Atomic8*>(data_copy_ptr), bytes);
}
return *array;
diff --git a/chromium/v8/src/runtime/runtime.cc b/chromium/v8/src/runtime/runtime.cc
index c1b287cae37..9e553b12af1 100644
--- a/chromium/v8/src/runtime/runtime.cc
+++ b/chromium/v8/src/runtime/runtime.cc
@@ -209,6 +209,7 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kOptimizeFunctionOnNextCall:
case Runtime::kOptimizeOsr:
case Runtime::kPrepareFunctionForOptimization:
+ case Runtime::kPretenureAllocationSite:
case Runtime::kSetAllocationTimeout:
case Runtime::kSimulateNewspaceFull:
return true;
@@ -218,8 +219,10 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kGetOptimizationStatus:
case Runtime::kHeapObjectVerify:
case Runtime::kIsBeingInterpreted:
+ case Runtime::kVerifyType:
return !FLAG_allow_natives_for_differential_fuzzing;
case Runtime::kCompileBaseline:
+ case Runtime::kBaselineOsr:
return FLAG_sparkplug;
default:
return false;
diff --git a/chromium/v8/src/runtime/runtime.h b/chromium/v8/src/runtime/runtime.h
index 578156f0942..eea742f2c99 100644
--- a/chromium/v8/src/runtime/runtime.h
+++ b/chromium/v8/src/runtime/runtime.h
@@ -115,7 +115,7 @@ namespace internal {
F(NotifyDeoptimized, 0, 1) \
F(ObserveNode, 1, 1) \
F(ResolvePossiblyDirectEval, 6, 1) \
- F(TryInstallNCICode, 1, 1)
+ F(VerifyType, 1, 1)
#define FOR_EACH_INTRINSIC_DATE(F, I) F(DateCurrentTime, 0, 1)
@@ -343,7 +343,7 @@ namespace internal {
I(ToNumber, 1, 1) \
F(ToNumeric, 1, 1) \
I(ToObject, 1, 1) \
- I(ToString, 1, 1) \
+ F(ToString, 1, 1) \
F(TryMigrateInstance, 1, 1) \
F(SwissTableAdd, 4, 1) \
F(SwissTableAllocate, 1, 1) \
@@ -383,8 +383,8 @@ namespace internal {
F(ResolvePromise, 2, 1) \
F(PromiseRejectAfterResolved, 2, 1) \
F(PromiseResolveAfterResolved, 2, 1) \
- F(ConstructAggregateErrorHelper, 3, 1) \
- F(ConstructInternalAggregateErrorHelper, -1 /* <= 4*/, 1)
+ F(ConstructAggregateErrorHelper, 4, 1) \
+ F(ConstructInternalAggregateErrorHelper, -1 /* <= 5*/, 1)
#define FOR_EACH_INTRINSIC_PROXY(F, I) \
F(CheckProxyGetSetTrapResult, 2, 1) \
@@ -407,31 +407,32 @@ namespace internal {
F(RegExpInitializeAndCompile, 3, 1) \
F(RegExpReplaceRT, 3, 1) \
F(RegExpSplit, 3, 1) \
+ F(RegExpStringFromFlags, 1, 1) \
F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
F(StringSplit, 3, 1)
-#define FOR_EACH_INTRINSIC_SCOPES(F, I) \
- F(DeclareEvalFunction, 2, 1) \
- F(DeclareEvalVar, 1, 1) \
- F(DeclareGlobals, 2, 1) \
- F(DeclareModuleExports, 2, 1) \
- F(DeleteLookupSlot, 1, 1) \
- F(LoadLookupSlot, 1, 1) \
- F(LoadLookupSlotInsideTypeof, 1, 1) \
- \
- F(NewClosure, 2, 1) \
- F(NewClosure_Tenured, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(NewRestParameter, 1, 1) \
- F(NewSloppyArguments, 1, 1) \
- F(NewStrictArguments, 1, 1) \
- F(PushBlockContext, 1, 1) \
- F(PushCatchContext, 2, 1) \
- F(PushWithContext, 2, 1) \
- F(StoreGlobalNoHoleCheckForReplLet, 2, 1) \
- F(StoreLookupSlot_Sloppy, 2, 1) \
- F(StoreLookupSlot_SloppyHoisting, 2, 1) \
- F(StoreLookupSlot_Strict, 2, 1) \
+#define FOR_EACH_INTRINSIC_SCOPES(F, I) \
+ F(DeclareEvalFunction, 2, 1) \
+ F(DeclareEvalVar, 1, 1) \
+ F(DeclareGlobals, 2, 1) \
+ F(DeclareModuleExports, 2, 1) \
+ F(DeleteLookupSlot, 1, 1) \
+ F(LoadLookupSlot, 1, 1) \
+ F(LoadLookupSlotInsideTypeof, 1, 1) \
+ \
+ F(NewClosure, 2, 1) \
+ F(NewClosure_Tenured, 2, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(NewRestParameter, 1, 1) \
+ F(NewSloppyArguments, 1, 1) \
+ F(NewStrictArguments, 1, 1) \
+ F(PushBlockContext, 1, 1) \
+ F(PushCatchContext, 2, 1) \
+ F(PushWithContext, 2, 1) \
+ F(StoreGlobalNoHoleCheckForReplLetOrConst, 2, 1) \
+ F(StoreLookupSlot_Sloppy, 2, 1) \
+ F(StoreLookupSlot_SloppyHoisting, 2, 1) \
+ F(StoreLookupSlot_Strict, 2, 1) \
F(ThrowConstAssignError, 0, 1)
#define FOR_EACH_INTRINSIC_STRINGS(F, I) \
@@ -460,104 +461,108 @@ namespace internal {
F(SymbolDescriptiveString, 1, 1) \
F(SymbolIsPrivate, 1, 1)
-#define FOR_EACH_INTRINSIC_TEST(F, I) \
- F(Abort, 1, 1) \
- F(AbortJS, 1, 1) \
- F(AbortCSAAssert, 1, 1) \
- F(ArraySpeciesProtector, 0, 1) \
- F(ClearFunctionFeedback, 1, 1) \
- F(ClearMegamorphicStubCache, 0, 1) \
- F(CompleteInobjectSlackTracking, 1, 1) \
- F(ConstructConsString, 2, 1) \
- F(ConstructDouble, 2, 1) \
- F(ConstructSlicedString, 2, 1) \
- F(DebugPrint, 1, 1) \
- F(DebugPrintPtr, 1, 1) \
- F(DebugTrace, 0, 1) \
- F(DebugTrackRetainingPath, -1, 1) \
- F(DeoptimizeFunction, 1, 1) \
- F(DisallowCodegenFromStrings, 1, 1) \
- F(DisassembleFunction, 1, 1) \
- F(DynamicCheckMapsEnabled, 0, 1) \
- F(IsTopTierTurboprop, 0, 1) \
- F(IsMidTierTurboprop, 0, 1) \
- F(EnableCodeLoggingForTesting, 0, 1) \
- F(EnsureFeedbackVectorForFunction, 1, 1) \
- F(GetCallable, 0, 1) \
- F(GetInitializerFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
- F(GetUndetectable, 0, 1) \
- F(GlobalPrint, 1, 1) \
- F(HasDictionaryElements, 1, 1) \
- F(HasDoubleElements, 1, 1) \
- F(HasElementsInALargeObjectSpace, 1, 1) \
- F(HasFastElements, 1, 1) \
- F(HasFastProperties, 1, 1) \
- F(HasOwnConstDataProperty, 2, 1) \
- F(HasFixedBigInt64Elements, 1, 1) \
- F(HasFixedBigUint64Elements, 1, 1) \
- F(HasFixedFloat32Elements, 1, 1) \
- F(HasFixedFloat64Elements, 1, 1) \
- F(HasFixedInt16Elements, 1, 1) \
- F(HasFixedInt32Elements, 1, 1) \
- F(HasFixedInt8Elements, 1, 1) \
- F(HasFixedUint16Elements, 1, 1) \
- F(HasFixedUint32Elements, 1, 1) \
- F(HasFixedUint8ClampedElements, 1, 1) \
- F(HasFixedUint8Elements, 1, 1) \
- F(HasHoleyElements, 1, 1) \
- F(HasObjectElements, 1, 1) \
- F(HasPackedElements, 1, 1) \
- F(HasSloppyArgumentsElements, 1, 1) \
- F(HasSmiElements, 1, 1) \
- F(HasSmiOrObjectElements, 1, 1) \
- F(HaveSameMap, 2, 1) \
- F(HeapObjectVerify, 1, 1) \
- F(ICsAreEnabled, 0, 1) \
- F(InLargeObjectSpace, 1, 1) \
- F(InYoungGeneration, 1, 1) \
- F(IsBeingInterpreted, 0, 1) \
- F(IsConcurrentRecompilationSupported, 0, 1) \
- F(IsDictPropertyConstTrackingEnabled, 0, 1) \
- F(RegexpHasBytecode, 2, 1) \
- F(RegexpHasNativeCode, 2, 1) \
- F(RegexpTypeTag, 1, 1) \
- F(RegexpIsUnmodified, 1, 1) \
- F(MapIteratorProtector, 0, 1) \
- F(ArrayIteratorProtector, 0, 1) \
- F(NeverOptimizeFunction, 1, 1) \
- F(NotifyContextDisposed, 0, 1) \
- F(OptimizeFunctionOnNextCall, -1, 1) \
- F(TierupFunctionOnNextCall, -1, 1) \
- F(OptimizeOsr, -1, 1) \
- F(NewRegExpWithBacktrackLimit, 3, 1) \
- F(PrepareFunctionForOptimization, -1, 1) \
- F(PrintWithNameForAssert, 2, 1) \
- F(RunningInSimulator, 0, 1) \
- F(RuntimeEvaluateREPL, 1, 1) \
- F(SerializeDeserializeNow, 0, 1) \
- F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(SetForceSlowPath, 1, 1) \
- F(SetIteratorProtector, 0, 1) \
- F(SimulateNewspaceFull, 0, 1) \
- F(ScheduleGCInStackCheck, 0, 1) \
- F(StringIteratorProtector, 0, 1) \
- F(SystemBreak, 0, 1) \
- F(TraceEnter, 0, 1) \
- F(TraceExit, 1, 1) \
- F(TurbofanStaticAssert, 1, 1) \
- F(TypedArraySpeciesProtector, 0, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
- I(DeoptimizeNow, 0, 1) \
- F(PromiseSpeciesProtector, 0, 1) \
- F(IsConcatSpreadableProtector, 0, 1) \
- F(RegExpSpeciesProtector, 0, 1)
-
-#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
- F(ArrayBufferDetach, 1, 1) \
- F(TypedArrayCopyElements, 3, 1) \
- F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArraySet, 2, 1) \
+#define FOR_EACH_INTRINSIC_TEST(F, I) \
+ F(Abort, 1, 1) \
+ F(AbortJS, 1, 1) \
+ F(AbortCSAAssert, 1, 1) \
+ F(ArraySpeciesProtector, 0, 1) \
+ F(BaselineOsr, -1, 1) \
+ F(ClearFunctionFeedback, 1, 1) \
+ F(ClearMegamorphicStubCache, 0, 1) \
+ F(CompleteInobjectSlackTracking, 1, 1) \
+ F(ConstructConsString, 2, 1) \
+ F(ConstructDouble, 2, 1) \
+ F(ConstructSlicedString, 2, 1) \
+ F(DebugPrint, 1, 1) \
+ F(DebugPrintPtr, 1, 1) \
+ F(DebugTrace, 0, 1) \
+ F(DebugTrackRetainingPath, -1, 1) \
+ F(DeoptimizeFunction, 1, 1) \
+ F(DisallowCodegenFromStrings, 1, 1) \
+ F(DisassembleFunction, 1, 1) \
+ F(DynamicCheckMapsEnabled, 0, 1) \
+ F(IsTopTierTurboprop, 0, 1) \
+ F(IsMidTierTurboprop, 0, 1) \
+ F(EnableCodeLoggingForTesting, 0, 1) \
+ F(EnsureFeedbackVectorForFunction, 1, 1) \
+ F(GetCallable, 0, 1) \
+ F(GetInitializerFunction, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
+ F(GetUndetectable, 0, 1) \
+ F(GlobalPrint, 1, 1) \
+ F(HasDictionaryElements, 1, 1) \
+ F(HasDoubleElements, 1, 1) \
+ F(HasElementsInALargeObjectSpace, 1, 1) \
+ F(HasFastElements, 1, 1) \
+ F(HasFastProperties, 1, 1) \
+ F(HasOwnConstDataProperty, 2, 1) \
+ F(HasFixedBigInt64Elements, 1, 1) \
+ F(HasFixedBigUint64Elements, 1, 1) \
+ F(HasFixedFloat32Elements, 1, 1) \
+ F(HasFixedFloat64Elements, 1, 1) \
+ F(HasFixedInt16Elements, 1, 1) \
+ F(HasFixedInt32Elements, 1, 1) \
+ F(HasFixedInt8Elements, 1, 1) \
+ F(HasFixedUint16Elements, 1, 1) \
+ F(HasFixedUint32Elements, 1, 1) \
+ F(HasFixedUint8ClampedElements, 1, 1) \
+ F(HasFixedUint8Elements, 1, 1) \
+ F(HasHoleyElements, 1, 1) \
+ F(HasObjectElements, 1, 1) \
+ F(HasPackedElements, 1, 1) \
+ F(HasSloppyArgumentsElements, 1, 1) \
+ F(HasSmiElements, 1, 1) \
+ F(HasSmiOrObjectElements, 1, 1) \
+ F(HaveSameMap, 2, 1) \
+ F(HeapObjectVerify, 1, 1) \
+ F(ICsAreEnabled, 0, 1) \
+ F(InLargeObjectSpace, 1, 1) \
+ F(InYoungGeneration, 1, 1) \
+ F(IsBeingInterpreted, 0, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(IsDictPropertyConstTrackingEnabled, 0, 1) \
+ F(RegexpHasBytecode, 2, 1) \
+ F(RegexpHasNativeCode, 2, 1) \
+ F(RegexpTypeTag, 1, 1) \
+ F(RegexpIsUnmodified, 1, 1) \
+ F(MapIteratorProtector, 0, 1) \
+ F(ArrayIteratorProtector, 0, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
+ F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(TierupFunctionOnNextCall, -1, 1) \
+ F(OptimizeOsr, -1, 1) \
+ F(NewRegExpWithBacktrackLimit, 3, 1) \
+ F(PrepareFunctionForOptimization, -1, 1) \
+ F(PretenureAllocationSite, 1, 1) \
+ F(PrintWithNameForAssert, 2, 1) \
+ F(RunningInSimulator, 0, 1) \
+ F(RuntimeEvaluateREPL, 1, 1) \
+ F(SerializeDeserializeNow, 0, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
+ F(SetForceSlowPath, 1, 1) \
+ F(SetIteratorProtector, 0, 1) \
+ F(SimulateNewspaceFull, 0, 1) \
+ F(ScheduleGCInStackCheck, 0, 1) \
+ F(StringIteratorProtector, 0, 1) \
+ F(SystemBreak, 0, 1) \
+ F(TraceEnter, 0, 1) \
+ F(TraceExit, 1, 1) \
+ F(TurbofanStaticAssert, 1, 1) \
+ F(TypedArraySpeciesProtector, 0, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
+ I(DeoptimizeNow, 0, 1) \
+ F(PromiseSpeciesProtector, 0, 1) \
+ F(IsConcatSpreadableProtector, 0, 1) \
+ F(RegExpSpeciesProtector, 0, 1) \
+ F(Is64Bit, 0, 1)
+
+#define FOR_EACH_INTRINSIC_TYPEDARRAY(F, I) \
+ F(ArrayBufferDetach, 1, 1) \
+ F(GrowableSharedArrayBufferByteLength, 1, 1) \
+ F(TypedArrayCopyElements, 3, 1) \
+ F(TypedArrayGetBuffer, 1, 1) \
+ F(TypedArraySet, 2, 1) \
F(TypedArraySortFast, 1, 1)
#define FOR_EACH_INTRINSIC_WASM(F, I) \
@@ -784,9 +789,10 @@ class Runtime : public AllStatic {
Handle<Object> value, StoreOrigin store_origin,
Maybe<ShouldThrow> should_throw = Nothing<ShouldThrow>());
- // When "receiver" is not passed, it defaults to "holder".
+ // When "receiver" is not passed, it defaults to "lookup_start_object".
V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT static MaybeHandle<Object>
- GetObjectProperty(Isolate* isolate, Handle<Object> holder, Handle<Object> key,
+ GetObjectProperty(Isolate* isolate, Handle<Object> lookup_start_object,
+ Handle<Object> key,
Handle<Object> receiver = Handle<Object>(),
bool* is_found = nullptr);
@@ -862,6 +868,8 @@ enum class OptimizationStatus {
kLiteMode = 1 << 12,
kMarkedForDeoptimization = 1 << 13,
kBaseline = 1 << 14,
+ kTopmostFrameIsInterpreted = 1 << 15,
+ kTopmostFrameIsBaseline = 1 << 16,
};
} // namespace internal
diff --git a/chromium/v8/src/sanitizer/asan.h b/chromium/v8/src/sanitizer/asan.h
deleted file mode 100644
index 0381e5a4c48..00000000000
--- a/chromium/v8/src/sanitizer/asan.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// AddressSanitizer support.
-
-#ifndef V8_SANITIZER_ASAN_H_
-#define V8_SANITIZER_ASAN_H_
-
-#include "src/base/macros.h"
-#include "src/common/globals.h"
-
-#ifdef V8_USE_ADDRESS_SANITIZER
-
-#include <sanitizer/asan_interface.h>
-
-#else // !V8_USE_ADDRESS_SANITIZER
-
-#define ASAN_POISON_MEMORY_REGION(start, size) \
- static_assert(std::is_pointer<decltype(start)>::value && \
- std::is_convertible<decltype(size), size_t>::value, \
- "static type violation")
-#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
- ASAN_POISON_MEMORY_REGION(start, size)
-
-#endif // V8_USE_ADDRESS_SANITIZER
-
-#endif // V8_SANITIZER_ASAN_H_
diff --git a/chromium/v8/src/sanitizer/msan.h b/chromium/v8/src/sanitizer/msan.h
deleted file mode 100644
index 01e774e7e41..00000000000
--- a/chromium/v8/src/sanitizer/msan.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// MemorySanitizer support.
-
-#ifndef V8_SANITIZER_MSAN_H_
-#define V8_SANITIZER_MSAN_H_
-
-#include "src/base/macros.h"
-#include "src/common/globals.h"
-
-#ifdef V8_USE_MEMORY_SANITIZER
-
-#include <sanitizer/msan_interface.h>
-
-// Marks a memory range as uninitialized, as if it was allocated here.
-#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s) \
- __msan_allocated_memory(reinterpret_cast<const void*>(p), (s))
-// Marks a memory range as initialized.
-#define MSAN_MEMORY_IS_INITIALIZED(p, s) \
- __msan_unpoison(reinterpret_cast<const void*>(p), (s))
-
-#else // !V8_USE_MEMORY_SANITIZER
-
-#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s) \
- static_assert((std::is_pointer<decltype(p)>::value || \
- std::is_same<v8::internal::Address, decltype(p)>::value) && \
- std::is_convertible<decltype(s), size_t>::value, \
- "static type violation")
-#define MSAN_MEMORY_IS_INITIALIZED(p, s) \
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s)
-
-#endif // V8_USE_MEMORY_SANITIZER
-
-#endif // V8_SANITIZER_MSAN_H_
diff --git a/chromium/v8/src/sanitizer/tsan.h b/chromium/v8/src/sanitizer/tsan.h
deleted file mode 100644
index 0013b91bfc7..00000000000
--- a/chromium/v8/src/sanitizer/tsan.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SANITIZER_TSAN_H_
-#define V8_SANITIZER_TSAN_H_
-
-namespace v8 {
-namespace base {
-
-// This file contains annotations for ThreadSanitizer (TSan), a race detector.
-// See
-// https://llvm.org/svn/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_interface_ann.cc
-
-#if THREAD_SANITIZER
-
-#define TSAN_ANNOTATE_IGNORE_READS_BEGIN \
- v8::base::AnnotateIgnoreReadsBegin(__FILE__, __LINE__)
-#define TSAN_ANNOTATE_IGNORE_READS_END \
- v8::base::AnnotateIgnoreReadsEnd(__FILE__, __LINE__)
-#define TSAN_ANNOTATE_IGNORE_WRITES_BEGIN \
- v8::base::AnnotateIgnoreWritesBegin(__FILE__, __LINE__)
-#define TSAN_ANNOTATE_IGNORE_WRITES_END \
- v8::base::AnnotateIgnoreWritesEnd(__FILE__, __LINE__)
-
-extern "C" {
-
-void AnnotateIgnoreReadsBegin(const char* file, int line);
-void AnnotateIgnoreReadsEnd(const char* file, int line);
-void AnnotateIgnoreWritesBegin(const char* file, int line);
-void AnnotateIgnoreWritesEnd(const char* file, int line);
-
-} // extern "C"
-
-#else
-
-#define TSAN_ANNOTATE_IGNORE_READS_BEGIN ((void)0)
-#define TSAN_ANNOTATE_IGNORE_READS_END ((void)0)
-#define TSAN_ANNOTATE_IGNORE_WRITES_BEGIN ((void)0)
-#define TSAN_ANNOTATE_IGNORE_WRITES_END ((void)0)
-
-#endif
-
-} // namespace base
-} // namespace v8
-
-#endif // V8_SANITIZER_TSAN_H_
diff --git a/chromium/v8/src/snapshot/code-serializer.cc b/chromium/v8/src/snapshot/code-serializer.cc
index a4641baabf2..3725b267f57 100644
--- a/chromium/v8/src/snapshot/code-serializer.cc
+++ b/chromium/v8/src/snapshot/code-serializer.cc
@@ -44,8 +44,7 @@ ScriptCompiler::CachedData* CodeSerializer::Serialize(
Isolate* isolate = info->GetIsolate();
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.Execute");
HistogramTimerScope histogram_timer(isolate->counters()->compile_serialize());
- RuntimeCallTimerScope runtimeTimer(isolate,
- RuntimeCallCounterId::kCompileSerialize);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileSerialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileSerialize");
base::ElapsedTimer timer;
diff --git a/chromium/v8/src/snapshot/context-deserializer.cc b/chromium/v8/src/snapshot/context-deserializer.cc
index 04756b5ffe8..ad109baccaf 100644
--- a/chromium/v8/src/snapshot/context-deserializer.cc
+++ b/chromium/v8/src/snapshot/context-deserializer.cc
@@ -62,9 +62,11 @@ void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
buffer->AllocateExternalPointerEntries(isolate());
+ // TODO(v8:11111): Support RAB / GSAB.
+ CHECK(!buffer->is_resizable());
SharedFlag shared =
bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
- buffer->Setup(shared, bs);
+ buffer->Setup(shared, ResizableFlag::kNotResizable, bs);
}
}
diff --git a/chromium/v8/src/snapshot/deserializer.cc b/chromium/v8/src/snapshot/deserializer.cc
index ecfa889f1e3..7756580c836 100644
--- a/chromium/v8/src/snapshot/deserializer.cc
+++ b/chromium/v8/src/snapshot/deserializer.cc
@@ -464,6 +464,9 @@ void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
new_descriptor_arrays_.push_back(descriptors);
+ } else if (InstanceTypeChecker::IsNativeContext(instance_type)) {
+ Handle<NativeContext> context = Handle<NativeContext>::cast(obj);
+ context->AllocateExternalPointerEntries(isolate());
}
// Check alignment.
diff --git a/chromium/v8/src/snapshot/embedded/embedded-data.cc b/chromium/v8/src/snapshot/embedded/embedded-data.cc
index 2a0549cfbb7..fb3883a410d 100644
--- a/chromium/v8/src/snapshot/embedded/embedded-data.cc
+++ b/chromium/v8/src/snapshot/embedded/embedded-data.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/snapshot-utils.h"
#include "src/snapshot/snapshot.h"
@@ -137,6 +138,9 @@ void InstructionStream::CreateOffHeapInstructionStream(Isolate* isolate,
// in the binary) and what we are currently setting up here (where the blob is
// on the native heap).
std::memcpy(allocated_code_bytes, d.code(), d.code_size());
+ if (FLAG_experimental_flush_embedded_blob_icache) {
+ FlushInstructionCache(allocated_code_bytes, d.code_size());
+ }
CHECK(SetPermissions(page_allocator, allocated_code_bytes,
allocation_code_size, PageAllocator::kReadExecute));
@@ -184,14 +188,15 @@ bool BuiltinAliasesOffHeapTrampolineRegister(Isolate* isolate, Code code) {
return false;
}
+ if (CallInterfaceDescriptor::ContextRegister() ==
+ kOffHeapTrampolineRegister) {
+ return true;
+ }
+
Callable callable = Builtins::CallableFor(
isolate, static_cast<Builtins::Name>(code.builtin_index()));
CallInterfaceDescriptor descriptor = callable.descriptor();
- if (descriptor.ContextRegister() == kOffHeapTrampolineRegister) {
- return true;
- }
-
for (int i = 0; i < descriptor.GetRegisterParameterCount(); i++) {
Register reg = descriptor.GetRegisterParameter(i);
if (reg == kOffHeapTrampolineRegister) return true;
@@ -213,7 +218,8 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
- defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390)
+ defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \
+ defined(V8_TARGET_ARCH_RISCV64)
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.
@@ -453,10 +459,9 @@ void EmbeddedData::PrintStatistics() const {
const int k99th = kCount * 0.99;
PrintF("EmbeddedData:\n");
- PrintF(" Total size: %d\n",
+ PrintF(" Total size: %d\n",
static_cast<int>(code_size() + data_size()));
- PrintF(" Data size: %d\n",
- static_cast<int>(data_size()));
+ PrintF(" Data size: %d\n", static_cast<int>(data_size()));
PrintF(" Code size: %d\n", static_cast<int>(code_size()));
PrintF(" Instruction size (50th percentile): %d\n", sizes[k50th]);
PrintF(" Instruction size (75th percentile): %d\n", sizes[k75th]);
diff --git a/chromium/v8/src/snapshot/embedded/embedded-data.h b/chromium/v8/src/snapshot/embedded/embedded-data.h
index 6518c38d025..12f524d154b 100644
--- a/chromium/v8/src/snapshot/embedded/embedded-data.h
+++ b/chromium/v8/src/snapshot/embedded/embedded-data.h
@@ -9,6 +9,7 @@
#include "src/builtins/builtins.h"
#include "src/common/globals.h"
#include "src/execution/isolate.h"
+#include "src/heap/code-range.h"
namespace v8 {
namespace internal {
@@ -62,6 +63,13 @@ class EmbeddedData final {
isolate->embedded_blob_data(), isolate->embedded_blob_data_size());
}
+ static EmbeddedData FromBlob(CodeRange* code_range) {
+ return EmbeddedData(code_range->embedded_blob_code_copy(),
+ Isolate::CurrentEmbeddedBlobCodeSize(),
+ Isolate::CurrentEmbeddedBlobData(),
+ Isolate::CurrentEmbeddedBlobDataSize());
+ }
+
const uint8_t* code() const { return code_; }
uint32_t code_size() const { return code_size_; }
const uint8_t* data() const { return data_; }
diff --git a/chromium/v8/src/snapshot/mksnapshot.cc b/chromium/v8/src/snapshot/mksnapshot.cc
index 4cccc8d1735..9a1e988bf86 100644
--- a/chromium/v8/src/snapshot/mksnapshot.cc
+++ b/chromium/v8/src/snapshot/mksnapshot.cc
@@ -11,10 +11,10 @@
#include "include/libplatform/libplatform.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
+#include "src/base/sanitizer/msan.h"
#include "src/codegen/assembler-arch.h"
#include "src/codegen/source-position-table.h"
#include "src/flags/flags.h"
-#include "src/sanitizer/msan.h"
#include "src/snapshot/context-serializer.h"
#include "src/snapshot/embedded/embedded-file-writer.h"
#include "src/snapshot/snapshot.h"
@@ -155,7 +155,7 @@ v8::StartupData CreateSnapshotDataBlob(v8::Isolate* isolate,
isolate);
if (i::FLAG_profile_deserialization) {
- i::PrintF("Creating snapshot took %0.3f ms\n",
+ i::PrintF("[Creating snapshot took %0.3f ms]\n",
timer.Elapsed().InMillisecondsF());
}
diff --git a/chromium/v8/src/snapshot/object-deserializer.cc b/chromium/v8/src/snapshot/object-deserializer.cc
index 929996ee106..d5ce8cc6e93 100644
--- a/chromium/v8/src/snapshot/object-deserializer.cc
+++ b/chromium/v8/src/snapshot/object-deserializer.cc
@@ -66,7 +66,9 @@ void ObjectDeserializer::CommitPostProcessedObjects() {
auto bs = backing_store(store_index);
SharedFlag shared =
bs && bs->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared;
- buffer->Setup(shared, bs);
+ // TODO(v8:11111): Support RAB / GSAB.
+ CHECK(!bs || !bs->is_resizable());
+ buffer->Setup(shared, ResizableFlag::kNotResizable, bs);
}
for (Handle<Script> script : new_scripts()) {
diff --git a/chromium/v8/src/snapshot/read-only-serializer.cc b/chromium/v8/src/snapshot/read-only-serializer.cc
index 06c5094782c..3dc5af0b0d7 100644
--- a/chromium/v8/src/snapshot/read-only-serializer.cc
+++ b/chromium/v8/src/snapshot/read-only-serializer.cc
@@ -74,6 +74,10 @@ void ReadOnlySerializer::SerializeReadOnlyRoots() {
isolate()->handle_scope_implementer()->blocks()->empty());
ReadOnlyRoots(isolate()).Iterate(this);
+
+ if (reconstruct_read_only_object_cache_for_testing()) {
+ ReconstructReadOnlyObjectCacheForTesting();
+ }
}
void ReadOnlySerializer::FinalizeSerialization() {
@@ -129,5 +133,18 @@ bool ReadOnlySerializer::SerializeUsingReadOnlyObjectCache(
return true;
}
+void ReadOnlySerializer::ReconstructReadOnlyObjectCacheForTesting() {
+ ReadOnlyHeap* ro_heap = isolate()->read_only_heap();
+ DCHECK(ro_heap->read_only_object_cache_is_initialized());
+ for (size_t i = 0, size = ro_heap->read_only_object_cache_size(); i < size;
+ i++) {
+ Handle<HeapObject> obj(
+ HeapObject::cast(ro_heap->cached_read_only_object(i)), isolate());
+ int cache_index = SerializeInObjectCache(obj);
+ USE(cache_index);
+ DCHECK_EQ(cache_index, i);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/snapshot/read-only-serializer.h b/chromium/v8/src/snapshot/read-only-serializer.h
index fd88b9f7b65..7f9482f3b9a 100644
--- a/chromium/v8/src/snapshot/read-only-serializer.h
+++ b/chromium/v8/src/snapshot/read-only-serializer.h
@@ -37,6 +37,8 @@ class V8_EXPORT_PRIVATE ReadOnlySerializer : public RootsSerializer {
Handle<HeapObject> obj);
private:
+ void ReconstructReadOnlyObjectCacheForTesting();
+
void SerializeObjectImpl(Handle<HeapObject> o) override;
bool MustBeDeferred(HeapObject object) override;
diff --git a/chromium/v8/src/snapshot/serializer-deserializer.h b/chromium/v8/src/snapshot/serializer-deserializer.h
index 0e156f75a09..82f34001065 100644
--- a/chromium/v8/src/snapshot/serializer-deserializer.h
+++ b/chromium/v8/src/snapshot/serializer-deserializer.h
@@ -187,14 +187,13 @@ class SerializerDeserializer : public RootVisitor {
}
static constexpr byte Encode(TValue value) {
- CONSTEXPR_DCHECK(IsEncodable(value));
+ DCHECK(IsEncodable(value));
return static_cast<byte>(kBytecode + static_cast<int>(value) - kMinValue);
}
static constexpr TValue Decode(byte bytecode) {
- CONSTEXPR_DCHECK(base::IsInRange(bytecode,
- Encode(static_cast<TValue>(kMinValue)),
- Encode(static_cast<TValue>(kMaxValue))));
+ DCHECK(base::IsInRange(bytecode, Encode(static_cast<TValue>(kMinValue)),
+ Encode(static_cast<TValue>(kMaxValue))));
return static_cast<TValue>(bytecode - kBytecode + kMinValue);
}
};
@@ -241,7 +240,7 @@ class SerializerDeserializer : public RootVisitor {
}
static constexpr int Encode(int repeat_count) {
- CONSTEXPR_DCHECK(IsEncodable(repeat_count));
+ DCHECK(IsEncodable(repeat_count));
return repeat_count - kFirstEncodableVariableRepeatCount;
}
diff --git a/chromium/v8/src/snapshot/serializer.cc b/chromium/v8/src/snapshot/serializer.cc
index 89c5485d62c..7f7551316c1 100644
--- a/chromium/v8/src/snapshot/serializer.cc
+++ b/chromium/v8/src/snapshot/serializer.cc
@@ -98,9 +98,9 @@ void Serializer::OutputStatistics(const char* name) {
}
INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
#undef PRINT_INSTANCE_TYPE
+#endif // OBJECT_PRINT
PrintF("\n");
-#endif // OBJECT_PRINT
}
void Serializer::SerializeDeferredObjects() {
diff --git a/chromium/v8/src/snapshot/serializer.h b/chromium/v8/src/snapshot/serializer.h
index 3743fa02c90..a1b17e4fd56 100644
--- a/chromium/v8/src/snapshot/serializer.h
+++ b/chromium/v8/src/snapshot/serializer.h
@@ -298,6 +298,10 @@ class Serializer : public SerializerDeserializer {
return (flags_ & Snapshot::kAllowActiveIsolateForTesting) != 0;
}
+ bool reconstruct_read_only_object_cache_for_testing() const {
+ return (flags_ & Snapshot::kReconstructReadOnlyObjectCacheForTesting) != 0;
+ }
+
private:
// A circular queue of hot objects. This is added to in the same order as in
// Deserializer::HotObjectsList, but this stores the objects as an array of
diff --git a/chromium/v8/src/snapshot/snapshot-utils.cc b/chromium/v8/src/snapshot/snapshot-utils.cc
index eb2372372c9..df53dfe751e 100644
--- a/chromium/v8/src/snapshot/snapshot-utils.cc
+++ b/chromium/v8/src/snapshot/snapshot-utils.cc
@@ -4,7 +4,7 @@
#include "src/snapshot/snapshot-utils.h"
-#include "src/sanitizer/msan.h"
+#include "src/base/sanitizer/msan.h"
#include "third_party/zlib/zlib.h"
namespace v8 {
diff --git a/chromium/v8/src/snapshot/snapshot.cc b/chromium/v8/src/snapshot/snapshot.cc
index b78e6a70d6e..3b4db28447b 100644
--- a/chromium/v8/src/snapshot/snapshot.cc
+++ b/chromium/v8/src/snapshot/snapshot.cc
@@ -144,8 +144,7 @@ bool Snapshot::VersionIsValid(const v8::StartupData* data) {
bool Snapshot::Initialize(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
- RuntimeCallTimerScope rcs_timer(isolate,
- RuntimeCallCounterId::kDeserializeIsolate);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kDeserializeIsolate);
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -173,8 +172,7 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy, size_t context_index,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer) {
if (!isolate->snapshot_available()) return Handle<Context>();
- RuntimeCallTimerScope rcs_timer(isolate,
- RuntimeCallCounterId::kDeserializeContext);
+ RCS_SCOPE(isolate, RuntimeCallCounterId::kDeserializeContext);
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -288,7 +286,10 @@ void Snapshot::SerializeDeserializeAndVerifyForTesting(
Snapshot::SerializerFlags flags(
Snapshot::kAllowUnknownExternalReferencesForTesting |
- Snapshot::kAllowActiveIsolateForTesting);
+ Snapshot::kAllowActiveIsolateForTesting |
+ (ReadOnlyHeap::IsReadOnlySpaceShared()
+ ? Snapshot::kReconstructReadOnlyObjectCacheForTesting
+ : 0));
serialized_data = Snapshot::Create(isolate, *default_context, no_gc, flags);
auto_delete_serialized_data.reset(serialized_data.data);
}
@@ -360,7 +361,7 @@ v8::StartupData Snapshot::Create(
context_serializer.Serialize(&contexts->at(i), no_gc);
can_be_rehashed = can_be_rehashed && context_serializer.can_be_rehashed();
context_snapshots.push_back(new SnapshotData(&context_serializer));
- if (FLAG_profile_deserialization) {
+ if (FLAG_serialization_statistics) {
context_allocation_sizes.push_back(
context_serializer.TotalAllocationSize());
}
@@ -374,15 +375,19 @@ v8::StartupData Snapshot::Create(
read_only_serializer.FinalizeSerialization();
can_be_rehashed = can_be_rehashed && read_only_serializer.can_be_rehashed();
- if (FLAG_profile_deserialization) {
+ if (FLAG_serialization_statistics) {
// These prints should match the regexp in test/memory/Memory.json
+ DCHECK_NE(read_only_serializer.TotalAllocationSize(), 0);
+ DCHECK_NE(startup_serializer.TotalAllocationSize(), 0);
PrintF("Deserialization will allocate:\n");
PrintF("%10d bytes per isolate\n",
read_only_serializer.TotalAllocationSize() +
startup_serializer.TotalAllocationSize());
for (int i = 0; i < num_contexts; i++) {
+ DCHECK_NE(context_allocation_sizes[i], 0);
PrintF("%10d bytes per context #%d\n", context_allocation_sizes[i], i);
}
+ PrintF("\n");
}
SnapshotData read_only_snapshot(&read_only_serializer);
@@ -471,7 +476,7 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
CopyBytes(data + payload_offset,
reinterpret_cast<const char*>(startup_snapshot->RawData().begin()),
payload_length);
- if (FLAG_profile_deserialization) {
+ if (FLAG_serialization_statistics) {
PrintF("Snapshot blob consists of:\n%10d bytes for startup\n",
payload_length);
}
@@ -485,7 +490,7 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
data + payload_offset,
reinterpret_cast<const char*>(read_only_snapshot->RawData().begin()),
payload_length);
- if (FLAG_profile_deserialization) {
+ if (FLAG_serialization_statistics) {
PrintF("%10d bytes for read-only\n", payload_length);
}
payload_offset += payload_length;
@@ -500,11 +505,12 @@ v8::StartupData SnapshotImpl::CreateSnapshotBlob(
data + payload_offset,
reinterpret_cast<const char*>(context_snapshot->RawData().begin()),
payload_length);
- if (FLAG_profile_deserialization) {
+ if (FLAG_serialization_statistics) {
PrintF("%10d bytes for context #%d\n", payload_length, i);
}
payload_offset += payload_length;
}
+ if (FLAG_serialization_statistics) PrintF("\n");
DCHECK_EQ(total_length, payload_offset);
v8::StartupData result = {data, static_cast<int>(total_length)};
diff --git a/chromium/v8/src/snapshot/snapshot.h b/chromium/v8/src/snapshot/snapshot.h
index 0e510f0096f..2f16eee6d58 100644
--- a/chromium/v8/src/snapshot/snapshot.h
+++ b/chromium/v8/src/snapshot/snapshot.h
@@ -36,6 +36,15 @@ class Snapshot : public AllStatic {
// after deserialization.
// If unset, we assert that these previously mentioned areas are empty.
kAllowActiveIsolateForTesting = 1 << 1,
+ // If set, the ReadOnlySerializer reconstructs the read-only object cache
+ // from the existing ReadOnlyHeap's read-only object cache so the same
+ // mapping is used. This mode is used for testing deserialization of a
+ // snapshot from a live isolate that's using a shared
+ // ReadOnlyHeap. Otherwise during deserialization the indices will mismatch,
+ // causing deserialization crashes when e.g. types mismatch.
+ // If unset, the read-only object cache is populated as read-only objects
+ // are serialized.
+ kReconstructReadOnlyObjectCacheForTesting = 1 << 2,
};
using SerializerFlags = base::Flags<SerializerFlag>;
V8_EXPORT_PRIVATE static constexpr SerializerFlags kDefaultSerializerFlags =
diff --git a/chromium/v8/src/strings/unicode.cc b/chromium/v8/src/strings/unicode.cc
index 21faccd0b42..a74e700ebd4 100644
--- a/chromium/v8/src/strings/unicode.cc
+++ b/chromium/v8/src/strings/unicode.cc
@@ -238,160 +238,112 @@ bool Uppercase::Is(uchar c) { return static_cast<bool>(u_isupper(c)); }
#else
static const uint16_t kUppercaseTable0Size = 455;
static const int32_t kUppercaseTable0[455] = {
- 1073741889, 90, 1073742016, 214,
- 1073742040, 222, 256, 258, // NOLINT
- 260, 262, 264, 266,
- 268, 270, 272, 274, // NOLINT
- 276, 278, 280, 282,
- 284, 286, 288, 290, // NOLINT
- 292, 294, 296, 298,
- 300, 302, 304, 306, // NOLINT
- 308, 310, 313, 315,
- 317, 319, 321, 323, // NOLINT
- 325, 327, 330, 332,
- 334, 336, 338, 340, // NOLINT
- 342, 344, 346, 348,
- 350, 352, 354, 356, // NOLINT
- 358, 360, 362, 364,
- 366, 368, 370, 372, // NOLINT
- 374, 1073742200, 377, 379,
- 381, 1073742209, 386, 388, // NOLINT
- 1073742214, 391, 1073742217, 395,
- 1073742222, 401, 1073742227, 404, // NOLINT
- 1073742230, 408, 1073742236, 413,
- 1073742239, 416, 418, 420, // NOLINT
- 1073742246, 423, 425, 428,
- 1073742254, 431, 1073742257, 435, // NOLINT
- 437, 1073742263, 440, 444,
- 452, 455, 458, 461, // NOLINT
- 463, 465, 467, 469,
- 471, 473, 475, 478, // NOLINT
- 480, 482, 484, 486,
- 488, 490, 492, 494, // NOLINT
- 497, 500, 1073742326, 504,
- 506, 508, 510, 512, // NOLINT
- 514, 516, 518, 520,
- 522, 524, 526, 528, // NOLINT
- 530, 532, 534, 536,
- 538, 540, 542, 544, // NOLINT
- 546, 548, 550, 552,
- 554, 556, 558, 560, // NOLINT
- 562, 1073742394, 571, 1073742397,
- 574, 577, 1073742403, 582, // NOLINT
- 584, 586, 588, 590,
- 880, 882, 886, 895, // NOLINT
- 902, 1073742728, 906, 908,
- 1073742734, 911, 1073742737, 929, // NOLINT
- 1073742755, 939, 975, 1073742802,
- 980, 984, 986, 988, // NOLINT
- 990, 992, 994, 996,
- 998, 1000, 1002, 1004, // NOLINT
- 1006, 1012, 1015, 1073742841,
- 1018, 1073742845, 1071, 1120, // NOLINT
- 1122, 1124, 1126, 1128,
- 1130, 1132, 1134, 1136, // NOLINT
- 1138, 1140, 1142, 1144,
- 1146, 1148, 1150, 1152, // NOLINT
- 1162, 1164, 1166, 1168,
- 1170, 1172, 1174, 1176, // NOLINT
- 1178, 1180, 1182, 1184,
- 1186, 1188, 1190, 1192, // NOLINT
- 1194, 1196, 1198, 1200,
- 1202, 1204, 1206, 1208, // NOLINT
- 1210, 1212, 1214, 1073743040,
- 1217, 1219, 1221, 1223, // NOLINT
- 1225, 1227, 1229, 1232,
- 1234, 1236, 1238, 1240, // NOLINT
- 1242, 1244, 1246, 1248,
- 1250, 1252, 1254, 1256, // NOLINT
- 1258, 1260, 1262, 1264,
- 1266, 1268, 1270, 1272, // NOLINT
- 1274, 1276, 1278, 1280,
- 1282, 1284, 1286, 1288, // NOLINT
- 1290, 1292, 1294, 1296,
- 1298, 1300, 1302, 1304, // NOLINT
- 1306, 1308, 1310, 1312,
- 1314, 1316, 1318, 1320, // NOLINT
- 1322, 1324, 1326, 1073743153,
- 1366, 1073746080, 4293, 4295, // NOLINT
- 4301, 7680, 7682, 7684,
- 7686, 7688, 7690, 7692, // NOLINT
- 7694, 7696, 7698, 7700,
- 7702, 7704, 7706, 7708, // NOLINT
- 7710, 7712, 7714, 7716,
- 7718, 7720, 7722, 7724, // NOLINT
- 7726, 7728, 7730, 7732,
- 7734, 7736, 7738, 7740, // NOLINT
- 7742, 7744, 7746, 7748,
- 7750, 7752, 7754, 7756, // NOLINT
- 7758, 7760, 7762, 7764,
- 7766, 7768, 7770, 7772, // NOLINT
- 7774, 7776, 7778, 7780,
- 7782, 7784, 7786, 7788, // NOLINT
- 7790, 7792, 7794, 7796,
- 7798, 7800, 7802, 7804, // NOLINT
- 7806, 7808, 7810, 7812,
- 7814, 7816, 7818, 7820, // NOLINT
- 7822, 7824, 7826, 7828,
- 7838, 7840, 7842, 7844, // NOLINT
- 7846, 7848, 7850, 7852,
- 7854, 7856, 7858, 7860, // NOLINT
- 7862, 7864, 7866, 7868,
- 7870, 7872, 7874, 7876, // NOLINT
- 7878, 7880, 7882, 7884,
- 7886, 7888, 7890, 7892, // NOLINT
- 7894, 7896, 7898, 7900,
- 7902, 7904, 7906, 7908, // NOLINT
- 7910, 7912, 7914, 7916,
- 7918, 7920, 7922, 7924, // NOLINT
- 7926, 7928, 7930, 7932,
- 7934, 1073749768, 7951, 1073749784, // NOLINT
- 7965, 1073749800, 7983, 1073749816,
- 7999, 1073749832, 8013, 8025, // NOLINT
- 8027, 8029, 8031, 1073749864,
- 8047, 1073749944, 8123, 1073749960, // NOLINT
- 8139, 1073749976, 8155, 1073749992,
- 8172, 1073750008, 8187}; // NOLINT
+ 1073741889, 90, 1073742016, 214, 1073742040, 222,
+ 256, 258, 260, 262, 264, 266,
+ 268, 270, 272, 274, 276, 278,
+ 280, 282, 284, 286, 288, 290,
+ 292, 294, 296, 298, 300, 302,
+ 304, 306, 308, 310, 313, 315,
+ 317, 319, 321, 323, 325, 327,
+ 330, 332, 334, 336, 338, 340,
+ 342, 344, 346, 348, 350, 352,
+ 354, 356, 358, 360, 362, 364,
+ 366, 368, 370, 372, 374, 1073742200,
+ 377, 379, 381, 1073742209, 386, 388,
+ 1073742214, 391, 1073742217, 395, 1073742222, 401,
+ 1073742227, 404, 1073742230, 408, 1073742236, 413,
+ 1073742239, 416, 418, 420, 1073742246, 423,
+ 425, 428, 1073742254, 431, 1073742257, 435,
+ 437, 1073742263, 440, 444, 452, 455,
+ 458, 461, 463, 465, 467, 469,
+ 471, 473, 475, 478, 480, 482,
+ 484, 486, 488, 490, 492, 494,
+ 497, 500, 1073742326, 504, 506, 508,
+ 510, 512, 514, 516, 518, 520,
+ 522, 524, 526, 528, 530, 532,
+ 534, 536, 538, 540, 542, 544,
+ 546, 548, 550, 552, 554, 556,
+ 558, 560, 562, 1073742394, 571, 1073742397,
+ 574, 577, 1073742403, 582, 584, 586,
+ 588, 590, 880, 882, 886, 895,
+ 902, 1073742728, 906, 908, 1073742734, 911,
+ 1073742737, 929, 1073742755, 939, 975, 1073742802,
+ 980, 984, 986, 988, 990, 992,
+ 994, 996, 998, 1000, 1002, 1004,
+ 1006, 1012, 1015, 1073742841, 1018, 1073742845,
+ 1071, 1120, 1122, 1124, 1126, 1128,
+ 1130, 1132, 1134, 1136, 1138, 1140,
+ 1142, 1144, 1146, 1148, 1150, 1152,
+ 1162, 1164, 1166, 1168, 1170, 1172,
+ 1174, 1176, 1178, 1180, 1182, 1184,
+ 1186, 1188, 1190, 1192, 1194, 1196,
+ 1198, 1200, 1202, 1204, 1206, 1208,
+ 1210, 1212, 1214, 1073743040, 1217, 1219,
+ 1221, 1223, 1225, 1227, 1229, 1232,
+ 1234, 1236, 1238, 1240, 1242, 1244,
+ 1246, 1248, 1250, 1252, 1254, 1256,
+ 1258, 1260, 1262, 1264, 1266, 1268,
+ 1270, 1272, 1274, 1276, 1278, 1280,
+ 1282, 1284, 1286, 1288, 1290, 1292,
+ 1294, 1296, 1298, 1300, 1302, 1304,
+ 1306, 1308, 1310, 1312, 1314, 1316,
+ 1318, 1320, 1322, 1324, 1326, 1073743153,
+ 1366, 1073746080, 4293, 4295, 4301, 7680,
+ 7682, 7684, 7686, 7688, 7690, 7692,
+ 7694, 7696, 7698, 7700, 7702, 7704,
+ 7706, 7708, 7710, 7712, 7714, 7716,
+ 7718, 7720, 7722, 7724, 7726, 7728,
+ 7730, 7732, 7734, 7736, 7738, 7740,
+ 7742, 7744, 7746, 7748, 7750, 7752,
+ 7754, 7756, 7758, 7760, 7762, 7764,
+ 7766, 7768, 7770, 7772, 7774, 7776,
+ 7778, 7780, 7782, 7784, 7786, 7788,
+ 7790, 7792, 7794, 7796, 7798, 7800,
+ 7802, 7804, 7806, 7808, 7810, 7812,
+ 7814, 7816, 7818, 7820, 7822, 7824,
+ 7826, 7828, 7838, 7840, 7842, 7844,
+ 7846, 7848, 7850, 7852, 7854, 7856,
+ 7858, 7860, 7862, 7864, 7866, 7868,
+ 7870, 7872, 7874, 7876, 7878, 7880,
+ 7882, 7884, 7886, 7888, 7890, 7892,
+ 7894, 7896, 7898, 7900, 7902, 7904,
+ 7906, 7908, 7910, 7912, 7914, 7916,
+ 7918, 7920, 7922, 7924, 7926, 7928,
+ 7930, 7932, 7934, 1073749768, 7951, 1073749784,
+ 7965, 1073749800, 7983, 1073749816, 7999, 1073749832,
+ 8013, 8025, 8027, 8029, 8031, 1073749864,
+ 8047, 1073749944, 8123, 1073749960, 8139, 1073749976,
+ 8155, 1073749992, 8172, 1073750008, 8187};
static const uint16_t kUppercaseTable1Size = 86;
static const int32_t kUppercaseTable1[86] = {
- 258, 263, 1073742091, 269, 1073742096, 274,
- 277, 1073742105, // NOLINT
- 285, 292, 294, 296, 1073742122, 301,
- 1073742128, 307, // NOLINT
- 1073742142, 319, 325, 387, 1073744896, 3118,
- 3168, 1073744994, // NOLINT
- 3172, 3175, 3177, 3179, 1073745005, 3184,
- 3186, 3189, // NOLINT
- 1073745022, 3200, 3202, 3204, 3206, 3208,
- 3210, 3212, // NOLINT
- 3214, 3216, 3218, 3220, 3222, 3224,
- 3226, 3228, // NOLINT
- 3230, 3232, 3234, 3236, 3238, 3240,
- 3242, 3244, // NOLINT
- 3246, 3248, 3250, 3252, 3254, 3256,
- 3258, 3260, // NOLINT
- 3262, 3264, 3266, 3268, 3270, 3272,
- 3274, 3276, // NOLINT
- 3278, 3280, 3282, 3284, 3286, 3288,
- 3290, 3292, // NOLINT
- 3294, 3296, 3298, 3307, 3309, 3314}; // NOLINT
+ 258, 263, 1073742091, 269, 1073742096, 274, 277,
+ 1073742105, 285, 292, 294, 296, 1073742122, 301,
+ 1073742128, 307, 1073742142, 319, 325, 387, 1073744896,
+ 3118, 3168, 1073744994, 3172, 3175, 3177, 3179,
+ 1073745005, 3184, 3186, 3189, 1073745022, 3200, 3202,
+ 3204, 3206, 3208, 3210, 3212, 3214, 3216,
+ 3218, 3220, 3222, 3224, 3226, 3228, 3230,
+ 3232, 3234, 3236, 3238, 3240, 3242, 3244,
+ 3246, 3248, 3250, 3252, 3254, 3256, 3258,
+ 3260, 3262, 3264, 3266, 3268, 3270, 3272,
+ 3274, 3276, 3278, 3280, 3282, 3284, 3286,
+ 3288, 3290, 3292, 3294, 3296, 3298, 3307,
+ 3309, 3314};
static const uint16_t kUppercaseTable5Size = 101;
static const int32_t kUppercaseTable5[101] = {
- 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, // NOLINT
- 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, // NOLINT
- 1632, 1634, 1636, 1638, 1640, 1642, 1644, 1664, // NOLINT
- 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, // NOLINT
- 1682, 1684, 1686, 1688, 1690, 1826, 1828, 1830, // NOLINT
- 1832, 1834, 1836, 1838, 1842, 1844, 1846, 1848, // NOLINT
- 1850, 1852, 1854, 1856, 1858, 1860, 1862, 1864, // NOLINT
- 1866, 1868, 1870, 1872, 1874, 1876, 1878, 1880, // NOLINT
- 1882, 1884, 1886, 1888, 1890, 1892, 1894, 1896, // NOLINT
- 1898, 1900, 1902, 1913, 1915, 1073743741, 1918, 1920, // NOLINT
- 1922, 1924, 1926, 1931, 1933, 1936, 1938, 1942, // NOLINT
- 1944, 1946, 1948, 1950, 1952, 1954, 1956, 1958, // NOLINT
- 1960, 1073743786, 1965, 1073743792, 1969}; // NOLINT
+ 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618,
+ 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638,
+ 1640, 1642, 1644, 1664, 1666, 1668, 1670, 1672, 1674, 1676,
+ 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1826, 1828, 1830,
+ 1832, 1834, 1836, 1838, 1842, 1844, 1846, 1848, 1850, 1852,
+ 1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868, 1870, 1872,
+ 1874, 1876, 1878, 1880, 1882, 1884, 1886, 1888, 1890, 1892,
+ 1894, 1896, 1898, 1900, 1902, 1913, 1915, 1073743741, 1918, 1920,
+ 1922, 1924, 1926, 1931, 1933, 1936, 1938, 1942, 1944, 1946,
+ 1948, 1950, 1952, 1954, 1956, 1958, 1960, 1073743786, 1965, 1073743792,
+ 1969};
static const uint16_t kUppercaseTable7Size = 2;
-static const int32_t kUppercaseTable7[2] = {1073749793, 7994}; // NOLINT
+static const int32_t kUppercaseTable7[2] = {1073749793, 7994};
bool Uppercase::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -415,189 +367,133 @@ bool Letter::Is(uchar c) { return static_cast<bool>(u_isalpha(c)); }
#else
static const uint16_t kLetterTable0Size = 431;
static const int32_t kLetterTable0[431] = {
- 1073741889, 90, 1073741921, 122,
- 170, 181, 186, 1073742016, // NOLINT
- 214, 1073742040, 246, 1073742072,
- 705, 1073742534, 721, 1073742560, // NOLINT
- 740, 748, 750, 1073742704,
- 884, 1073742710, 887, 1073742714, // NOLINT
- 893, 895, 902, 1073742728,
- 906, 908, 1073742734, 929, // NOLINT
- 1073742755, 1013, 1073742839, 1153,
- 1073742986, 1327, 1073743153, 1366, // NOLINT
- 1369, 1073743201, 1415, 1073743312,
- 1514, 1073743344, 1522, 1073743392, // NOLINT
- 1610, 1073743470, 1647, 1073743473,
- 1747, 1749, 1073743589, 1766, // NOLINT
- 1073743598, 1775, 1073743610, 1788,
- 1791, 1808, 1073743634, 1839, // NOLINT
- 1073743693, 1957, 1969, 1073743818,
- 2026, 1073743860, 2037, 2042, // NOLINT
- 1073743872, 2069, 2074, 2084,
- 2088, 1073743936, 2136, 1073744032, // NOLINT
- 2226, 1073744132, 2361, 2365,
- 2384, 1073744216, 2401, 1073744241, // NOLINT
- 2432, 1073744261, 2444, 1073744271,
- 2448, 1073744275, 2472, 1073744298, // NOLINT
- 2480, 2482, 1073744310, 2489,
- 2493, 2510, 1073744348, 2525, // NOLINT
- 1073744351, 2529, 1073744368, 2545,
- 1073744389, 2570, 1073744399, 2576, // NOLINT
- 1073744403, 2600, 1073744426, 2608,
- 1073744434, 2611, 1073744437, 2614, // NOLINT
- 1073744440, 2617, 1073744473, 2652,
- 2654, 1073744498, 2676, 1073744517, // NOLINT
- 2701, 1073744527, 2705, 1073744531,
- 2728, 1073744554, 2736, 1073744562, // NOLINT
- 2739, 1073744565, 2745, 2749,
- 2768, 1073744608, 2785, 1073744645, // NOLINT
- 2828, 1073744655, 2832, 1073744659,
- 2856, 1073744682, 2864, 1073744690, // NOLINT
- 2867, 1073744693, 2873, 2877,
- 1073744732, 2909, 1073744735, 2913, // NOLINT
- 2929, 2947, 1073744773, 2954,
- 1073744782, 2960, 1073744786, 2965, // NOLINT
- 1073744793, 2970, 2972, 1073744798,
- 2975, 1073744803, 2980, 1073744808, // NOLINT
- 2986, 1073744814, 3001, 3024,
- 1073744901, 3084, 1073744910, 3088, // NOLINT
- 1073744914, 3112, 1073744938, 3129,
- 3133, 1073744984, 3161, 1073744992, // NOLINT
- 3169, 1073745029, 3212, 1073745038,
- 3216, 1073745042, 3240, 1073745066, // NOLINT
- 3251, 1073745077, 3257, 3261,
- 3294, 1073745120, 3297, 1073745137, // NOLINT
- 3314, 1073745157, 3340, 1073745166,
- 3344, 1073745170, 3386, 3389, // NOLINT
- 3406, 1073745248, 3425, 1073745274,
- 3455, 1073745285, 3478, 1073745306, // NOLINT
- 3505, 1073745331, 3515, 3517,
- 1073745344, 3526, 1073745409, 3632, // NOLINT
- 1073745458, 3635, 1073745472, 3654,
- 1073745537, 3714, 3716, 1073745543, // NOLINT
- 3720, 3722, 3725, 1073745556,
- 3735, 1073745561, 3743, 1073745569, // NOLINT
- 3747, 3749, 3751, 1073745578,
- 3755, 1073745581, 3760, 1073745586, // NOLINT
- 3763, 3773, 1073745600, 3780,
- 3782, 1073745628, 3807, 3840, // NOLINT
- 1073745728, 3911, 1073745737, 3948,
- 1073745800, 3980, 1073745920, 4138, // NOLINT
- 4159, 1073746000, 4181, 1073746010,
- 4189, 4193, 1073746021, 4198, // NOLINT
- 1073746030, 4208, 1073746037, 4225,
- 4238, 1073746080, 4293, 4295, // NOLINT
- 4301, 1073746128, 4346, 1073746172,
- 4680, 1073746506, 4685, 1073746512, // NOLINT
- 4694, 4696, 1073746522, 4701,
- 1073746528, 4744, 1073746570, 4749, // NOLINT
- 1073746576, 4784, 1073746610, 4789,
- 1073746616, 4798, 4800, 1073746626, // NOLINT
- 4805, 1073746632, 4822, 1073746648,
- 4880, 1073746706, 4885, 1073746712, // NOLINT
- 4954, 1073746816, 5007, 1073746848,
- 5108, 1073746945, 5740, 1073747567, // NOLINT
- 5759, 1073747585, 5786, 1073747616,
- 5866, 1073747694, 5880, 1073747712, // NOLINT
- 5900, 1073747726, 5905, 1073747744,
- 5937, 1073747776, 5969, 1073747808, // NOLINT
- 5996, 1073747822, 6000, 1073747840,
- 6067, 6103, 6108, 1073748000, // NOLINT
- 6263, 1073748096, 6312, 6314,
- 1073748144, 6389, 1073748224, 6430, // NOLINT
- 1073748304, 6509, 1073748336, 6516,
- 1073748352, 6571, 1073748417, 6599, // NOLINT
- 1073748480, 6678, 1073748512, 6740,
- 6823, 1073748741, 6963, 1073748805, // NOLINT
- 6987, 1073748867, 7072, 1073748910,
- 7087, 1073748922, 7141, 1073748992, // NOLINT
- 7203, 1073749069, 7247, 1073749082,
- 7293, 1073749225, 7404, 1073749230, // NOLINT
- 7409, 1073749237, 7414, 1073749248,
- 7615, 1073749504, 7957, 1073749784, // NOLINT
- 7965, 1073749792, 8005, 1073749832,
- 8013, 1073749840, 8023, 8025, // NOLINT
- 8027, 8029, 1073749855, 8061,
- 1073749888, 8116, 1073749942, 8124, // NOLINT
- 8126, 1073749954, 8132, 1073749958,
- 8140, 1073749968, 8147, 1073749974, // NOLINT
- 8155, 1073749984, 8172, 1073750002,
- 8180, 1073750006, 8188}; // NOLINT
+ 1073741889, 90, 1073741921, 122, 170, 181,
+ 186, 1073742016, 214, 1073742040, 246, 1073742072,
+ 705, 1073742534, 721, 1073742560, 740, 748,
+ 750, 1073742704, 884, 1073742710, 887, 1073742714,
+ 893, 895, 902, 1073742728, 906, 908,
+ 1073742734, 929, 1073742755, 1013, 1073742839, 1153,
+ 1073742986, 1327, 1073743153, 1366, 1369, 1073743201,
+ 1415, 1073743312, 1514, 1073743344, 1522, 1073743392,
+ 1610, 1073743470, 1647, 1073743473, 1747, 1749,
+ 1073743589, 1766, 1073743598, 1775, 1073743610, 1788,
+ 1791, 1808, 1073743634, 1839, 1073743693, 1957,
+ 1969, 1073743818, 2026, 1073743860, 2037, 2042,
+ 1073743872, 2069, 2074, 2084, 2088, 1073743936,
+ 2136, 1073744032, 2226, 1073744132, 2361, 2365,
+ 2384, 1073744216, 2401, 1073744241, 2432, 1073744261,
+ 2444, 1073744271, 2448, 1073744275, 2472, 1073744298,
+ 2480, 2482, 1073744310, 2489, 2493, 2510,
+ 1073744348, 2525, 1073744351, 2529, 1073744368, 2545,
+ 1073744389, 2570, 1073744399, 2576, 1073744403, 2600,
+ 1073744426, 2608, 1073744434, 2611, 1073744437, 2614,
+ 1073744440, 2617, 1073744473, 2652, 2654, 1073744498,
+ 2676, 1073744517, 2701, 1073744527, 2705, 1073744531,
+ 2728, 1073744554, 2736, 1073744562, 2739, 1073744565,
+ 2745, 2749, 2768, 1073744608, 2785, 1073744645,
+ 2828, 1073744655, 2832, 1073744659, 2856, 1073744682,
+ 2864, 1073744690, 2867, 1073744693, 2873, 2877,
+ 1073744732, 2909, 1073744735, 2913, 2929, 2947,
+ 1073744773, 2954, 1073744782, 2960, 1073744786, 2965,
+ 1073744793, 2970, 2972, 1073744798, 2975, 1073744803,
+ 2980, 1073744808, 2986, 1073744814, 3001, 3024,
+ 1073744901, 3084, 1073744910, 3088, 1073744914, 3112,
+ 1073744938, 3129, 3133, 1073744984, 3161, 1073744992,
+ 3169, 1073745029, 3212, 1073745038, 3216, 1073745042,
+ 3240, 1073745066, 3251, 1073745077, 3257, 3261,
+ 3294, 1073745120, 3297, 1073745137, 3314, 1073745157,
+ 3340, 1073745166, 3344, 1073745170, 3386, 3389,
+ 3406, 1073745248, 3425, 1073745274, 3455, 1073745285,
+ 3478, 1073745306, 3505, 1073745331, 3515, 3517,
+ 1073745344, 3526, 1073745409, 3632, 1073745458, 3635,
+ 1073745472, 3654, 1073745537, 3714, 3716, 1073745543,
+ 3720, 3722, 3725, 1073745556, 3735, 1073745561,
+ 3743, 1073745569, 3747, 3749, 3751, 1073745578,
+ 3755, 1073745581, 3760, 1073745586, 3763, 3773,
+ 1073745600, 3780, 3782, 1073745628, 3807, 3840,
+ 1073745728, 3911, 1073745737, 3948, 1073745800, 3980,
+ 1073745920, 4138, 4159, 1073746000, 4181, 1073746010,
+ 4189, 4193, 1073746021, 4198, 1073746030, 4208,
+ 1073746037, 4225, 4238, 1073746080, 4293, 4295,
+ 4301, 1073746128, 4346, 1073746172, 4680, 1073746506,
+ 4685, 1073746512, 4694, 4696, 1073746522, 4701,
+ 1073746528, 4744, 1073746570, 4749, 1073746576, 4784,
+ 1073746610, 4789, 1073746616, 4798, 4800, 1073746626,
+ 4805, 1073746632, 4822, 1073746648, 4880, 1073746706,
+ 4885, 1073746712, 4954, 1073746816, 5007, 1073746848,
+ 5108, 1073746945, 5740, 1073747567, 5759, 1073747585,
+ 5786, 1073747616, 5866, 1073747694, 5880, 1073747712,
+ 5900, 1073747726, 5905, 1073747744, 5937, 1073747776,
+ 5969, 1073747808, 5996, 1073747822, 6000, 1073747840,
+ 6067, 6103, 6108, 1073748000, 6263, 1073748096,
+ 6312, 6314, 1073748144, 6389, 1073748224, 6430,
+ 1073748304, 6509, 1073748336, 6516, 1073748352, 6571,
+ 1073748417, 6599, 1073748480, 6678, 1073748512, 6740,
+ 6823, 1073748741, 6963, 1073748805, 6987, 1073748867,
+ 7072, 1073748910, 7087, 1073748922, 7141, 1073748992,
+ 7203, 1073749069, 7247, 1073749082, 7293, 1073749225,
+ 7404, 1073749230, 7409, 1073749237, 7414, 1073749248,
+ 7615, 1073749504, 7957, 1073749784, 7965, 1073749792,
+ 8005, 1073749832, 8013, 1073749840, 8023, 8025,
+ 8027, 8029, 1073749855, 8061, 1073749888, 8116,
+ 1073749942, 8124, 8126, 1073749954, 8132, 1073749958,
+ 8140, 1073749968, 8147, 1073749974, 8155, 1073749984,
+ 8172, 1073750002, 8180, 1073750006, 8188};
static const uint16_t kLetterTable1Size = 87;
static const int32_t kLetterTable1[87] = {
- 113, 127, 1073741968, 156,
- 258, 263, 1073742090, 275, // NOLINT
- 277, 1073742105, 285, 292,
- 294, 296, 1073742122, 301, // NOLINT
- 1073742127, 313, 1073742140, 319,
- 1073742149, 329, 334, 1073742176, // NOLINT
- 392, 1073744896, 3118, 1073744944,
- 3166, 1073744992, 3300, 1073745131, // NOLINT
- 3310, 1073745138, 3315, 1073745152,
- 3365, 3367, 3373, 1073745200, // NOLINT
- 3431, 3439, 1073745280, 3478,
- 1073745312, 3494, 1073745320, 3502, // NOLINT
- 1073745328, 3510, 1073745336, 3518,
- 1073745344, 3526, 1073745352, 3534, // NOLINT
- 1073745360, 3542, 1073745368, 3550,
- 3631, 1073745925, 4103, 1073745953, // NOLINT
- 4137, 1073745969, 4149, 1073745976,
- 4156, 1073745985, 4246, 1073746077, // NOLINT
- 4255, 1073746081, 4346, 1073746172,
- 4351, 1073746181, 4397, 1073746225, // NOLINT
- 4494, 1073746336, 4538, 1073746416,
- 4607, 1073746944, 8191}; // NOLINT
+ 113, 127, 1073741968, 156, 258, 263,
+ 1073742090, 275, 277, 1073742105, 285, 292,
+ 294, 296, 1073742122, 301, 1073742127, 313,
+ 1073742140, 319, 1073742149, 329, 334, 1073742176,
+ 392, 1073744896, 3118, 1073744944, 3166, 1073744992,
+ 3300, 1073745131, 3310, 1073745138, 3315, 1073745152,
+ 3365, 3367, 3373, 1073745200, 3431, 3439,
+ 1073745280, 3478, 1073745312, 3494, 1073745320, 3502,
+ 1073745328, 3510, 1073745336, 3518, 1073745344, 3526,
+ 1073745352, 3534, 1073745360, 3542, 1073745368, 3550,
+ 3631, 1073745925, 4103, 1073745953, 4137, 1073745969,
+ 4149, 1073745976, 4156, 1073745985, 4246, 1073746077,
+ 4255, 1073746081, 4346, 1073746172, 4351, 1073746181,
+ 4397, 1073746225, 4494, 1073746336, 4538, 1073746416,
+ 4607, 1073746944, 8191};
static const uint16_t kLetterTable2Size = 4;
-static const int32_t kLetterTable2[4] = {1073741824, 3509, 1073745408,
- 8191}; // NOLINT
+static const int32_t kLetterTable2[4] = {1073741824, 3509, 1073745408, 8191};
static const uint16_t kLetterTable3Size = 2;
-static const int32_t kLetterTable3[2] = {1073741824, 8191}; // NOLINT
+static const int32_t kLetterTable3[2] = {1073741824, 8191};
static const uint16_t kLetterTable4Size = 2;
-static const int32_t kLetterTable4[2] = {1073741824, 8140}; // NOLINT
+static const int32_t kLetterTable4[2] = {1073741824, 8140};
static const uint16_t kLetterTable5Size = 100;
static const int32_t kLetterTable5[100] = {
- 1073741824, 1164, 1073743056, 1277,
- 1073743104, 1548, 1073743376, 1567, // NOLINT
- 1073743402, 1579, 1073743424, 1646,
- 1073743487, 1693, 1073743520, 1775, // NOLINT
- 1073743639, 1823, 1073743650, 1928,
- 1073743755, 1934, 1073743760, 1965, // NOLINT
- 1073743792, 1969, 1073743863, 2049,
- 1073743875, 2053, 1073743879, 2058, // NOLINT
- 1073743884, 2082, 1073743936, 2163,
- 1073744002, 2227, 1073744114, 2295, // NOLINT
- 2299, 1073744138, 2341, 1073744176,
- 2374, 1073744224, 2428, 1073744260, // NOLINT
- 2482, 2511, 1073744352, 2532,
- 1073744358, 2543, 1073744378, 2558, // NOLINT
- 1073744384, 2600, 1073744448, 2626,
- 1073744452, 2635, 1073744480, 2678, // NOLINT
- 2682, 1073744510, 2735, 2737,
- 1073744565, 2742, 1073744569, 2749, // NOLINT
- 2752, 2754, 1073744603, 2781,
- 1073744608, 2794, 1073744626, 2804, // NOLINT
- 1073744641, 2822, 1073744649, 2830,
- 1073744657, 2838, 1073744672, 2854, // NOLINT
- 1073744680, 2862, 1073744688, 2906,
- 1073744732, 2911, 1073744740, 2917, // NOLINT
- 1073744832, 3042, 1073744896, 8191}; // NOLINT
+ 1073741824, 1164, 1073743056, 1277, 1073743104, 1548,
+ 1073743376, 1567, 1073743402, 1579, 1073743424, 1646,
+ 1073743487, 1693, 1073743520, 1775, 1073743639, 1823,
+ 1073743650, 1928, 1073743755, 1934, 1073743760, 1965,
+ 1073743792, 1969, 1073743863, 2049, 1073743875, 2053,
+ 1073743879, 2058, 1073743884, 2082, 1073743936, 2163,
+ 1073744002, 2227, 1073744114, 2295, 2299, 1073744138,
+ 2341, 1073744176, 2374, 1073744224, 2428, 1073744260,
+ 2482, 2511, 1073744352, 2532, 1073744358, 2543,
+ 1073744378, 2558, 1073744384, 2600, 1073744448, 2626,
+ 1073744452, 2635, 1073744480, 2678, 2682, 1073744510,
+ 2735, 2737, 1073744565, 2742, 1073744569, 2749,
+ 2752, 2754, 1073744603, 2781, 1073744608, 2794,
+ 1073744626, 2804, 1073744641, 2822, 1073744649, 2830,
+ 1073744657, 2838, 1073744672, 2854, 1073744680, 2862,
+ 1073744688, 2906, 1073744732, 2911, 1073744740, 2917,
+ 1073744832, 3042, 1073744896, 8191};
static const uint16_t kLetterTable6Size = 6;
-static const int32_t kLetterTable6[6] = {1073741824, 6051, 1073747888, 6086,
- 1073747915, 6139}; // NOLINT
+static const int32_t kLetterTable6[6] = {1073741824, 6051, 1073747888,
+ 6086, 1073747915, 6139};
static const uint16_t kLetterTable7Size = 48;
static const int32_t kLetterTable7[48] = {
- 1073748224, 6765, 1073748592, 6873,
- 1073748736, 6918, 1073748755, 6935, // NOLINT
- 6941, 1073748767, 6952, 1073748778,
- 6966, 1073748792, 6972, 6974, // NOLINT
- 1073748800, 6977, 1073748803, 6980,
- 1073748806, 7089, 1073748947, 7485, // NOLINT
- 1073749328, 7567, 1073749394, 7623,
- 1073749488, 7675, 1073749616, 7796, // NOLINT
- 1073749622, 7932, 1073749793, 7994,
- 1073749825, 8026, 1073749862, 8126, // NOLINT
- 1073749954, 8135, 1073749962, 8143,
- 1073749970, 8151, 1073749978, 8156}; // NOLINT
+ 1073748224, 6765, 1073748592, 6873, 1073748736, 6918,
+ 1073748755, 6935, 6941, 1073748767, 6952, 1073748778,
+ 6966, 1073748792, 6972, 6974, 1073748800, 6977,
+ 1073748803, 6980, 1073748806, 7089, 1073748947, 7485,
+ 1073749328, 7567, 1073749394, 7623, 1073749488, 7675,
+ 1073749616, 7796, 1073749622, 7932, 1073749793, 7994,
+ 1073749825, 8026, 1073749862, 8126, 1073749954, 8135,
+ 1073749962, 8143, 1073749970, 8151, 1073749978, 8156};
bool Letter::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -631,189 +527,133 @@ bool Letter::Is(uchar c) {
static const uint16_t kID_StartTable0Size = 434;
static const int32_t kID_StartTable0[434] = {
- 36, 1073741889, 90, 92,
- 95, 1073741921, 122, 170, // NOLINT
- 181, 186, 1073742016, 214,
- 1073742040, 246, 1073742072, 705, // NOLINT
- 1073742534, 721, 1073742560, 740,
- 748, 750, 1073742704, 884, // NOLINT
- 1073742710, 887, 1073742714, 893,
- 895, 902, 1073742728, 906, // NOLINT
- 908, 1073742734, 929, 1073742755,
- 1013, 1073742839, 1153, 1073742986, // NOLINT
- 1327, 1073743153, 1366, 1369,
- 1073743201, 1415, 1073743312, 1514, // NOLINT
- 1073743344, 1522, 1073743392, 1610,
- 1073743470, 1647, 1073743473, 1747, // NOLINT
- 1749, 1073743589, 1766, 1073743598,
- 1775, 1073743610, 1788, 1791, // NOLINT
- 1808, 1073743634, 1839, 1073743693,
- 1957, 1969, 1073743818, 2026, // NOLINT
- 1073743860, 2037, 2042, 1073743872,
- 2069, 2074, 2084, 2088, // NOLINT
- 1073743936, 2136, 1073744032, 2226,
- 1073744132, 2361, 2365, 2384, // NOLINT
- 1073744216, 2401, 1073744241, 2432,
- 1073744261, 2444, 1073744271, 2448, // NOLINT
- 1073744275, 2472, 1073744298, 2480,
- 2482, 1073744310, 2489, 2493, // NOLINT
- 2510, 1073744348, 2525, 1073744351,
- 2529, 1073744368, 2545, 1073744389, // NOLINT
- 2570, 1073744399, 2576, 1073744403,
- 2600, 1073744426, 2608, 1073744434, // NOLINT
- 2611, 1073744437, 2614, 1073744440,
- 2617, 1073744473, 2652, 2654, // NOLINT
- 1073744498, 2676, 1073744517, 2701,
- 1073744527, 2705, 1073744531, 2728, // NOLINT
- 1073744554, 2736, 1073744562, 2739,
- 1073744565, 2745, 2749, 2768, // NOLINT
- 1073744608, 2785, 1073744645, 2828,
- 1073744655, 2832, 1073744659, 2856, // NOLINT
- 1073744682, 2864, 1073744690, 2867,
- 1073744693, 2873, 2877, 1073744732, // NOLINT
- 2909, 1073744735, 2913, 2929,
- 2947, 1073744773, 2954, 1073744782, // NOLINT
- 2960, 1073744786, 2965, 1073744793,
- 2970, 2972, 1073744798, 2975, // NOLINT
- 1073744803, 2980, 1073744808, 2986,
- 1073744814, 3001, 3024, 1073744901, // NOLINT
- 3084, 1073744910, 3088, 1073744914,
- 3112, 1073744938, 3129, 3133, // NOLINT
- 1073744984, 3161, 1073744992, 3169,
- 1073745029, 3212, 1073745038, 3216, // NOLINT
- 1073745042, 3240, 1073745066, 3251,
- 1073745077, 3257, 3261, 3294, // NOLINT
- 1073745120, 3297, 1073745137, 3314,
- 1073745157, 3340, 1073745166, 3344, // NOLINT
- 1073745170, 3386, 3389, 3406,
- 1073745248, 3425, 1073745274, 3455, // NOLINT
- 1073745285, 3478, 1073745306, 3505,
- 1073745331, 3515, 3517, 1073745344, // NOLINT
- 3526, 1073745409, 3632, 1073745458,
- 3635, 1073745472, 3654, 1073745537, // NOLINT
- 3714, 3716, 1073745543, 3720,
- 3722, 3725, 1073745556, 3735, // NOLINT
- 1073745561, 3743, 1073745569, 3747,
- 3749, 3751, 1073745578, 3755, // NOLINT
- 1073745581, 3760, 1073745586, 3763,
- 3773, 1073745600, 3780, 3782, // NOLINT
- 1073745628, 3807, 3840, 1073745728,
- 3911, 1073745737, 3948, 1073745800, // NOLINT
- 3980, 1073745920, 4138, 4159,
- 1073746000, 4181, 1073746010, 4189, // NOLINT
- 4193, 1073746021, 4198, 1073746030,
- 4208, 1073746037, 4225, 4238, // NOLINT
- 1073746080, 4293, 4295, 4301,
- 1073746128, 4346, 1073746172, 4680, // NOLINT
- 1073746506, 4685, 1073746512, 4694,
- 4696, 1073746522, 4701, 1073746528, // NOLINT
- 4744, 1073746570, 4749, 1073746576,
- 4784, 1073746610, 4789, 1073746616, // NOLINT
- 4798, 4800, 1073746626, 4805,
- 1073746632, 4822, 1073746648, 4880, // NOLINT
- 1073746706, 4885, 1073746712, 4954,
- 1073746816, 5007, 1073746848, 5108, // NOLINT
- 1073746945, 5740, 1073747567, 5759,
- 1073747585, 5786, 1073747616, 5866, // NOLINT
- 1073747694, 5880, 1073747712, 5900,
- 1073747726, 5905, 1073747744, 5937, // NOLINT
- 1073747776, 5969, 1073747808, 5996,
- 1073747822, 6000, 1073747840, 6067, // NOLINT
- 6103, 6108, 1073748000, 6263,
- 1073748096, 6312, 6314, 1073748144, // NOLINT
- 6389, 1073748224, 6430, 1073748304,
- 6509, 1073748336, 6516, 1073748352, // NOLINT
- 6571, 1073748417, 6599, 1073748480,
- 6678, 1073748512, 6740, 6823, // NOLINT
- 1073748741, 6963, 1073748805, 6987,
- 1073748867, 7072, 1073748910, 7087, // NOLINT
- 1073748922, 7141, 1073748992, 7203,
- 1073749069, 7247, 1073749082, 7293, // NOLINT
- 1073749225, 7404, 1073749230, 7409,
- 1073749237, 7414, 1073749248, 7615, // NOLINT
- 1073749504, 7957, 1073749784, 7965,
- 1073749792, 8005, 1073749832, 8013, // NOLINT
- 1073749840, 8023, 8025, 8027,
- 8029, 1073749855, 8061, 1073749888, // NOLINT
- 8116, 1073749942, 8124, 8126,
- 1073749954, 8132, 1073749958, 8140, // NOLINT
- 1073749968, 8147, 1073749974, 8155,
- 1073749984, 8172, 1073750002, 8180, // NOLINT
- 1073750006, 8188}; // NOLINT
+ 36, 1073741889, 90, 92, 95, 1073741921,
+ 122, 170, 181, 186, 1073742016, 214,
+ 1073742040, 246, 1073742072, 705, 1073742534, 721,
+ 1073742560, 740, 748, 750, 1073742704, 884,
+ 1073742710, 887, 1073742714, 893, 895, 902,
+ 1073742728, 906, 908, 1073742734, 929, 1073742755,
+ 1013, 1073742839, 1153, 1073742986, 1327, 1073743153,
+ 1366, 1369, 1073743201, 1415, 1073743312, 1514,
+ 1073743344, 1522, 1073743392, 1610, 1073743470, 1647,
+ 1073743473, 1747, 1749, 1073743589, 1766, 1073743598,
+ 1775, 1073743610, 1788, 1791, 1808, 1073743634,
+ 1839, 1073743693, 1957, 1969, 1073743818, 2026,
+ 1073743860, 2037, 2042, 1073743872, 2069, 2074,
+ 2084, 2088, 1073743936, 2136, 1073744032, 2226,
+ 1073744132, 2361, 2365, 2384, 1073744216, 2401,
+ 1073744241, 2432, 1073744261, 2444, 1073744271, 2448,
+ 1073744275, 2472, 1073744298, 2480, 2482, 1073744310,
+ 2489, 2493, 2510, 1073744348, 2525, 1073744351,
+ 2529, 1073744368, 2545, 1073744389, 2570, 1073744399,
+ 2576, 1073744403, 2600, 1073744426, 2608, 1073744434,
+ 2611, 1073744437, 2614, 1073744440, 2617, 1073744473,
+ 2652, 2654, 1073744498, 2676, 1073744517, 2701,
+ 1073744527, 2705, 1073744531, 2728, 1073744554, 2736,
+ 1073744562, 2739, 1073744565, 2745, 2749, 2768,
+ 1073744608, 2785, 1073744645, 2828, 1073744655, 2832,
+ 1073744659, 2856, 1073744682, 2864, 1073744690, 2867,
+ 1073744693, 2873, 2877, 1073744732, 2909, 1073744735,
+ 2913, 2929, 2947, 1073744773, 2954, 1073744782,
+ 2960, 1073744786, 2965, 1073744793, 2970, 2972,
+ 1073744798, 2975, 1073744803, 2980, 1073744808, 2986,
+ 1073744814, 3001, 3024, 1073744901, 3084, 1073744910,
+ 3088, 1073744914, 3112, 1073744938, 3129, 3133,
+ 1073744984, 3161, 1073744992, 3169, 1073745029, 3212,
+ 1073745038, 3216, 1073745042, 3240, 1073745066, 3251,
+ 1073745077, 3257, 3261, 3294, 1073745120, 3297,
+ 1073745137, 3314, 1073745157, 3340, 1073745166, 3344,
+ 1073745170, 3386, 3389, 3406, 1073745248, 3425,
+ 1073745274, 3455, 1073745285, 3478, 1073745306, 3505,
+ 1073745331, 3515, 3517, 1073745344, 3526, 1073745409,
+ 3632, 1073745458, 3635, 1073745472, 3654, 1073745537,
+ 3714, 3716, 1073745543, 3720, 3722, 3725,
+ 1073745556, 3735, 1073745561, 3743, 1073745569, 3747,
+ 3749, 3751, 1073745578, 3755, 1073745581, 3760,
+ 1073745586, 3763, 3773, 1073745600, 3780, 3782,
+ 1073745628, 3807, 3840, 1073745728, 3911, 1073745737,
+ 3948, 1073745800, 3980, 1073745920, 4138, 4159,
+ 1073746000, 4181, 1073746010, 4189, 4193, 1073746021,
+ 4198, 1073746030, 4208, 1073746037, 4225, 4238,
+ 1073746080, 4293, 4295, 4301, 1073746128, 4346,
+ 1073746172, 4680, 1073746506, 4685, 1073746512, 4694,
+ 4696, 1073746522, 4701, 1073746528, 4744, 1073746570,
+ 4749, 1073746576, 4784, 1073746610, 4789, 1073746616,
+ 4798, 4800, 1073746626, 4805, 1073746632, 4822,
+ 1073746648, 4880, 1073746706, 4885, 1073746712, 4954,
+ 1073746816, 5007, 1073746848, 5108, 1073746945, 5740,
+ 1073747567, 5759, 1073747585, 5786, 1073747616, 5866,
+ 1073747694, 5880, 1073747712, 5900, 1073747726, 5905,
+ 1073747744, 5937, 1073747776, 5969, 1073747808, 5996,
+ 1073747822, 6000, 1073747840, 6067, 6103, 6108,
+ 1073748000, 6263, 1073748096, 6312, 6314, 1073748144,
+ 6389, 1073748224, 6430, 1073748304, 6509, 1073748336,
+ 6516, 1073748352, 6571, 1073748417, 6599, 1073748480,
+ 6678, 1073748512, 6740, 6823, 1073748741, 6963,
+ 1073748805, 6987, 1073748867, 7072, 1073748910, 7087,
+ 1073748922, 7141, 1073748992, 7203, 1073749069, 7247,
+ 1073749082, 7293, 1073749225, 7404, 1073749230, 7409,
+ 1073749237, 7414, 1073749248, 7615, 1073749504, 7957,
+ 1073749784, 7965, 1073749792, 8005, 1073749832, 8013,
+ 1073749840, 8023, 8025, 8027, 8029, 1073749855,
+ 8061, 1073749888, 8116, 1073749942, 8124, 8126,
+ 1073749954, 8132, 1073749958, 8140, 1073749968, 8147,
+ 1073749974, 8155, 1073749984, 8172, 1073750002, 8180,
+ 1073750006, 8188};
static const uint16_t kID_StartTable1Size = 84;
static const int32_t kID_StartTable1[84] = {
- 113, 127, 1073741968, 156,
- 258, 263, 1073742090, 275, // NOLINT
- 277, 1073742104, 285, 292,
- 294, 296, 1073742122, 313, // NOLINT
- 1073742140, 319, 1073742149, 329,
- 334, 1073742176, 392, 1073744896, // NOLINT
- 3118, 1073744944, 3166, 1073744992,
- 3300, 1073745131, 3310, 1073745138, // NOLINT
- 3315, 1073745152, 3365, 3367,
- 3373, 1073745200, 3431, 3439, // NOLINT
- 1073745280, 3478, 1073745312, 3494,
- 1073745320, 3502, 1073745328, 3510, // NOLINT
- 1073745336, 3518, 1073745344, 3526,
- 1073745352, 3534, 1073745360, 3542, // NOLINT
- 1073745368, 3550, 1073745925, 4103,
- 1073745953, 4137, 1073745969, 4149, // NOLINT
- 1073745976, 4156, 1073745985, 4246,
- 1073746075, 4255, 1073746081, 4346, // NOLINT
- 1073746172, 4351, 1073746181, 4397,
- 1073746225, 4494, 1073746336, 4538, // NOLINT
- 1073746416, 4607, 1073746944, 8191}; // NOLINT
+ 113, 127, 1073741968, 156, 258, 263,
+ 1073742090, 275, 277, 1073742104, 285, 292,
+ 294, 296, 1073742122, 313, 1073742140, 319,
+ 1073742149, 329, 334, 1073742176, 392, 1073744896,
+ 3118, 1073744944, 3166, 1073744992, 3300, 1073745131,
+ 3310, 1073745138, 3315, 1073745152, 3365, 3367,
+ 3373, 1073745200, 3431, 3439, 1073745280, 3478,
+ 1073745312, 3494, 1073745320, 3502, 1073745328, 3510,
+ 1073745336, 3518, 1073745344, 3526, 1073745352, 3534,
+ 1073745360, 3542, 1073745368, 3550, 1073745925, 4103,
+ 1073745953, 4137, 1073745969, 4149, 1073745976, 4156,
+ 1073745985, 4246, 1073746075, 4255, 1073746081, 4346,
+ 1073746172, 4351, 1073746181, 4397, 1073746225, 4494,
+ 1073746336, 4538, 1073746416, 4607, 1073746944, 8191};
static const uint16_t kID_StartTable2Size = 4;
-static const int32_t kID_StartTable2[4] = {1073741824, 3509, 1073745408,
- 8191}; // NOLINT
+static const int32_t kID_StartTable2[4] = {1073741824, 3509, 1073745408, 8191};
static const uint16_t kID_StartTable3Size = 2;
-static const int32_t kID_StartTable3[2] = {1073741824, 8191}; // NOLINT
+static const int32_t kID_StartTable3[2] = {1073741824, 8191};
static const uint16_t kID_StartTable4Size = 2;
-static const int32_t kID_StartTable4[2] = {1073741824, 8140}; // NOLINT
+static const int32_t kID_StartTable4[2] = {1073741824, 8140};
static const uint16_t kID_StartTable5Size = 100;
static const int32_t kID_StartTable5[100] = {
- 1073741824, 1164, 1073743056, 1277,
- 1073743104, 1548, 1073743376, 1567, // NOLINT
- 1073743402, 1579, 1073743424, 1646,
- 1073743487, 1693, 1073743520, 1775, // NOLINT
- 1073743639, 1823, 1073743650, 1928,
- 1073743755, 1934, 1073743760, 1965, // NOLINT
- 1073743792, 1969, 1073743863, 2049,
- 1073743875, 2053, 1073743879, 2058, // NOLINT
- 1073743884, 2082, 1073743936, 2163,
- 1073744002, 2227, 1073744114, 2295, // NOLINT
- 2299, 1073744138, 2341, 1073744176,
- 2374, 1073744224, 2428, 1073744260, // NOLINT
- 2482, 2511, 1073744352, 2532,
- 1073744358, 2543, 1073744378, 2558, // NOLINT
- 1073744384, 2600, 1073744448, 2626,
- 1073744452, 2635, 1073744480, 2678, // NOLINT
- 2682, 1073744510, 2735, 2737,
- 1073744565, 2742, 1073744569, 2749, // NOLINT
- 2752, 2754, 1073744603, 2781,
- 1073744608, 2794, 1073744626, 2804, // NOLINT
- 1073744641, 2822, 1073744649, 2830,
- 1073744657, 2838, 1073744672, 2854, // NOLINT
- 1073744680, 2862, 1073744688, 2906,
- 1073744732, 2911, 1073744740, 2917, // NOLINT
- 1073744832, 3042, 1073744896, 8191}; // NOLINT
+ 1073741824, 1164, 1073743056, 1277, 1073743104, 1548,
+ 1073743376, 1567, 1073743402, 1579, 1073743424, 1646,
+ 1073743487, 1693, 1073743520, 1775, 1073743639, 1823,
+ 1073743650, 1928, 1073743755, 1934, 1073743760, 1965,
+ 1073743792, 1969, 1073743863, 2049, 1073743875, 2053,
+ 1073743879, 2058, 1073743884, 2082, 1073743936, 2163,
+ 1073744002, 2227, 1073744114, 2295, 2299, 1073744138,
+ 2341, 1073744176, 2374, 1073744224, 2428, 1073744260,
+ 2482, 2511, 1073744352, 2532, 1073744358, 2543,
+ 1073744378, 2558, 1073744384, 2600, 1073744448, 2626,
+ 1073744452, 2635, 1073744480, 2678, 2682, 1073744510,
+ 2735, 2737, 1073744565, 2742, 1073744569, 2749,
+ 2752, 2754, 1073744603, 2781, 1073744608, 2794,
+ 1073744626, 2804, 1073744641, 2822, 1073744649, 2830,
+ 1073744657, 2838, 1073744672, 2854, 1073744680, 2862,
+ 1073744688, 2906, 1073744732, 2911, 1073744740, 2917,
+ 1073744832, 3042, 1073744896, 8191};
static const uint16_t kID_StartTable6Size = 6;
-static const int32_t kID_StartTable6[6] = {1073741824, 6051, 1073747888, 6086,
- 1073747915, 6139}; // NOLINT
+static const int32_t kID_StartTable6[6] = {1073741824, 6051, 1073747888,
+ 6086, 1073747915, 6139};
static const uint16_t kID_StartTable7Size = 48;
static const int32_t kID_StartTable7[48] = {
- 1073748224, 6765, 1073748592, 6873,
- 1073748736, 6918, 1073748755, 6935, // NOLINT
- 6941, 1073748767, 6952, 1073748778,
- 6966, 1073748792, 6972, 6974, // NOLINT
- 1073748800, 6977, 1073748803, 6980,
- 1073748806, 7089, 1073748947, 7485, // NOLINT
- 1073749328, 7567, 1073749394, 7623,
- 1073749488, 7675, 1073749616, 7796, // NOLINT
- 1073749622, 7932, 1073749793, 7994,
- 1073749825, 8026, 1073749862, 8126, // NOLINT
- 1073749954, 8135, 1073749962, 8143,
- 1073749970, 8151, 1073749978, 8156}; // NOLINT
+ 1073748224, 6765, 1073748592, 6873, 1073748736, 6918,
+ 1073748755, 6935, 6941, 1073748767, 6952, 1073748778,
+ 6966, 1073748792, 6972, 6974, 1073748800, 6977,
+ 1073748803, 6980, 1073748806, 7089, 1073748947, 7485,
+ 1073749328, 7567, 1073749394, 7623, 1073749488, 7675,
+ 1073749616, 7796, 1073749622, 7932, 1073749793, 7994,
+ 1073749825, 8026, 1073749862, 8126, 1073749954, 8135,
+ 1073749962, 8143, 1073749970, 8151, 1073749978, 8156};
bool ID_Start::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -844,115 +684,82 @@ bool ID_Start::Is(uchar c) {
static const uint16_t kID_ContinueTable0Size = 315;
static const int32_t kID_ContinueTable0[315] = {
- 1073741872, 57, 95, 183,
- 1073742592, 879, 903, 1073742979, // NOLINT
- 1159, 1073743249, 1469, 1471,
- 1073743297, 1474, 1073743300, 1477, // NOLINT
- 1479, 1073743376, 1562, 1073743435,
- 1641, 1648, 1073743574, 1756, // NOLINT
- 1073743583, 1764, 1073743591, 1768,
- 1073743594, 1773, 1073743600, 1785, // NOLINT
- 1809, 1073743664, 1866, 1073743782,
- 1968, 1073743808, 1993, 1073743851, // NOLINT
- 2035, 1073743894, 2073, 1073743899,
- 2083, 1073743909, 2087, 1073743913, // NOLINT
- 2093, 1073743961, 2139, 1073744100,
- 2307, 1073744186, 2364, 1073744190, // NOLINT
- 2383, 1073744209, 2391, 1073744226,
- 2403, 1073744230, 2415, 1073744257, // NOLINT
- 2435, 2492, 1073744318, 2500,
- 1073744327, 2504, 1073744331, 2509, // NOLINT
- 2519, 1073744354, 2531, 1073744358,
- 2543, 1073744385, 2563, 2620, // NOLINT
- 1073744446, 2626, 1073744455, 2632,
- 1073744459, 2637, 2641, 1073744486, // NOLINT
- 2673, 2677, 1073744513, 2691,
- 2748, 1073744574, 2757, 1073744583, // NOLINT
- 2761, 1073744587, 2765, 1073744610,
- 2787, 1073744614, 2799, 1073744641, // NOLINT
- 2819, 2876, 1073744702, 2884,
- 1073744711, 2888, 1073744715, 2893, // NOLINT
- 1073744726, 2903, 1073744738, 2915,
- 1073744742, 2927, 2946, 1073744830, // NOLINT
- 3010, 1073744838, 3016, 1073744842,
- 3021, 3031, 1073744870, 3055, // NOLINT
- 1073744896, 3075, 1073744958, 3140,
- 1073744966, 3144, 1073744970, 3149, // NOLINT
- 1073744981, 3158, 1073744994, 3171,
- 1073744998, 3183, 1073745025, 3203, // NOLINT
- 3260, 1073745086, 3268, 1073745094,
- 3272, 1073745098, 3277, 1073745109, // NOLINT
- 3286, 1073745122, 3299, 1073745126,
- 3311, 1073745153, 3331, 1073745214, // NOLINT
- 3396, 1073745222, 3400, 1073745226,
- 3405, 3415, 1073745250, 3427, // NOLINT
- 1073745254, 3439, 1073745282, 3459,
- 3530, 1073745359, 3540, 3542, // NOLINT
- 1073745368, 3551, 1073745382, 3567,
- 1073745394, 3571, 3633, 1073745460, // NOLINT
- 3642, 1073745479, 3662, 1073745488,
- 3673, 3761, 1073745588, 3769, // NOLINT
- 1073745595, 3772, 1073745608, 3789,
- 1073745616, 3801, 1073745688, 3865, // NOLINT
- 1073745696, 3881, 3893, 3895,
- 3897, 1073745726, 3903, 1073745777, // NOLINT
- 3972, 1073745798, 3975, 1073745805,
- 3991, 1073745817, 4028, 4038, // NOLINT
- 1073745963, 4158, 1073745984, 4169,
- 1073746006, 4185, 1073746014, 4192, // NOLINT
- 1073746018, 4196, 1073746023, 4205,
- 1073746033, 4212, 1073746050, 4237, // NOLINT
- 1073746063, 4253, 1073746781, 4959,
- 1073746793, 4977, 1073747730, 5908, // NOLINT
- 1073747762, 5940, 1073747794, 5971,
- 1073747826, 6003, 1073747892, 6099, // NOLINT
- 6109, 1073747936, 6121, 1073747979,
- 6157, 1073747984, 6169, 6313, // NOLINT
- 1073748256, 6443, 1073748272, 6459,
- 1073748294, 6479, 1073748400, 6592, // NOLINT
- 1073748424, 6601, 1073748432, 6618,
- 1073748503, 6683, 1073748565, 6750, // NOLINT
- 1073748576, 6780, 1073748607, 6793,
- 1073748624, 6809, 1073748656, 6845, // NOLINT
- 1073748736, 6916, 1073748788, 6980,
- 1073748816, 7001, 1073748843, 7027, // NOLINT
- 1073748864, 7042, 1073748897, 7085,
- 1073748912, 7097, 1073748966, 7155, // NOLINT
- 1073749028, 7223, 1073749056, 7241,
- 1073749072, 7257, 1073749200, 7378, // NOLINT
- 1073749204, 7400, 7405, 1073749234,
- 7412, 1073749240, 7417, 1073749440, // NOLINT
- 7669, 1073749500, 7679}; // NOLINT
+ 1073741872, 57, 95, 183, 1073742592, 879,
+ 903, 1073742979, 1159, 1073743249, 1469, 1471,
+ 1073743297, 1474, 1073743300, 1477, 1479, 1073743376,
+ 1562, 1073743435, 1641, 1648, 1073743574, 1756,
+ 1073743583, 1764, 1073743591, 1768, 1073743594, 1773,
+ 1073743600, 1785, 1809, 1073743664, 1866, 1073743782,
+ 1968, 1073743808, 1993, 1073743851, 2035, 1073743894,
+ 2073, 1073743899, 2083, 1073743909, 2087, 1073743913,
+ 2093, 1073743961, 2139, 1073744100, 2307, 1073744186,
+ 2364, 1073744190, 2383, 1073744209, 2391, 1073744226,
+ 2403, 1073744230, 2415, 1073744257, 2435, 2492,
+ 1073744318, 2500, 1073744327, 2504, 1073744331, 2509,
+ 2519, 1073744354, 2531, 1073744358, 2543, 1073744385,
+ 2563, 2620, 1073744446, 2626, 1073744455, 2632,
+ 1073744459, 2637, 2641, 1073744486, 2673, 2677,
+ 1073744513, 2691, 2748, 1073744574, 2757, 1073744583,
+ 2761, 1073744587, 2765, 1073744610, 2787, 1073744614,
+ 2799, 1073744641, 2819, 2876, 1073744702, 2884,
+ 1073744711, 2888, 1073744715, 2893, 1073744726, 2903,
+ 1073744738, 2915, 1073744742, 2927, 2946, 1073744830,
+ 3010, 1073744838, 3016, 1073744842, 3021, 3031,
+ 1073744870, 3055, 1073744896, 3075, 1073744958, 3140,
+ 1073744966, 3144, 1073744970, 3149, 1073744981, 3158,
+ 1073744994, 3171, 1073744998, 3183, 1073745025, 3203,
+ 3260, 1073745086, 3268, 1073745094, 3272, 1073745098,
+ 3277, 1073745109, 3286, 1073745122, 3299, 1073745126,
+ 3311, 1073745153, 3331, 1073745214, 3396, 1073745222,
+ 3400, 1073745226, 3405, 3415, 1073745250, 3427,
+ 1073745254, 3439, 1073745282, 3459, 3530, 1073745359,
+ 3540, 3542, 1073745368, 3551, 1073745382, 3567,
+ 1073745394, 3571, 3633, 1073745460, 3642, 1073745479,
+ 3662, 1073745488, 3673, 3761, 1073745588, 3769,
+ 1073745595, 3772, 1073745608, 3789, 1073745616, 3801,
+ 1073745688, 3865, 1073745696, 3881, 3893, 3895,
+ 3897, 1073745726, 3903, 1073745777, 3972, 1073745798,
+ 3975, 1073745805, 3991, 1073745817, 4028, 4038,
+ 1073745963, 4158, 1073745984, 4169, 1073746006, 4185,
+ 1073746014, 4192, 1073746018, 4196, 1073746023, 4205,
+ 1073746033, 4212, 1073746050, 4237, 1073746063, 4253,
+ 1073746781, 4959, 1073746793, 4977, 1073747730, 5908,
+ 1073747762, 5940, 1073747794, 5971, 1073747826, 6003,
+ 1073747892, 6099, 6109, 1073747936, 6121, 1073747979,
+ 6157, 1073747984, 6169, 6313, 1073748256, 6443,
+ 1073748272, 6459, 1073748294, 6479, 1073748400, 6592,
+ 1073748424, 6601, 1073748432, 6618, 1073748503, 6683,
+ 1073748565, 6750, 1073748576, 6780, 1073748607, 6793,
+ 1073748624, 6809, 1073748656, 6845, 1073748736, 6916,
+ 1073748788, 6980, 1073748816, 7001, 1073748843, 7027,
+ 1073748864, 7042, 1073748897, 7085, 1073748912, 7097,
+ 1073748966, 7155, 1073749028, 7223, 1073749056, 7241,
+ 1073749072, 7257, 1073749200, 7378, 1073749204, 7400,
+ 7405, 1073749234, 7412, 1073749240, 7417, 1073749440,
+ 7669, 1073749500, 7679};
static const uint16_t kID_ContinueTable1Size = 19;
static const int32_t kID_ContinueTable1[19] = {
- 1073741836, 13, 1073741887, 64,
- 84, 1073742032, 220, 225, // NOLINT
- 1073742053, 240, 1073745135, 3313,
- 3455, 1073745376, 3583, 1073745962, // NOLINT
- 4143, 1073746073, 4250}; // NOLINT
+ 1073741836, 13, 1073741887, 64, 84,
+ 1073742032, 220, 225, 1073742053, 240,
+ 1073745135, 3313, 3455, 1073745376, 3583,
+ 1073745962, 4143, 1073746073, 4250};
static const uint16_t kID_ContinueTable5Size = 63;
static const int32_t kID_ContinueTable5[63] = {
- 1073743392, 1577, 1647, 1073743476,
- 1661, 1695, 1073743600, 1777, // NOLINT
- 2050, 2054, 2059, 1073743907,
- 2087, 1073744000, 2177, 1073744052, // NOLINT
- 2244, 1073744080, 2265, 1073744096,
- 2289, 1073744128, 2313, 1073744166, // NOLINT
- 2349, 1073744199, 2387, 1073744256,
- 2435, 1073744307, 2496, 1073744336, // NOLINT
- 2521, 2533, 1073744368, 2553,
- 1073744425, 2614, 2627, 1073744460, // NOLINT
- 2637, 1073744464, 2649, 1073744507,
- 2685, 2736, 1073744562, 2740, // NOLINT
- 1073744567, 2744, 1073744574, 2751,
- 2753, 1073744619, 2799, 1073744629, // NOLINT
- 2806, 1073744867, 3050, 1073744876,
- 3053, 1073744880, 3065}; // NOLINT
+ 1073743392, 1577, 1647, 1073743476, 1661, 1695,
+ 1073743600, 1777, 2050, 2054, 2059, 1073743907,
+ 2087, 1073744000, 2177, 1073744052, 2244, 1073744080,
+ 2265, 1073744096, 2289, 1073744128, 2313, 1073744166,
+ 2349, 1073744199, 2387, 1073744256, 2435, 1073744307,
+ 2496, 1073744336, 2521, 2533, 1073744368, 2553,
+ 1073744425, 2614, 2627, 1073744460, 2637, 1073744464,
+ 2649, 1073744507, 2685, 2736, 1073744562, 2740,
+ 1073744567, 2744, 1073744574, 2751, 2753, 1073744619,
+ 2799, 1073744629, 2806, 1073744867, 3050, 1073744876,
+ 3053, 1073744880, 3065};
static const uint16_t kID_ContinueTable7Size = 12;
static const int32_t kID_ContinueTable7[12] = {
- 6942, 1073749504, 7695, 1073749536,
- 7725, 1073749555, 7732, 1073749581, // NOLINT
- 7759, 1073749776, 7961, 7999}; // NOLINT
+ 6942, 1073749504, 7695, 1073749536, 7725, 1073749555,
+ 7732, 1073749581, 7759, 1073749776, 7961, 7999};
bool ID_Continue::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -973,13 +780,11 @@ bool ID_Continue::Is(uchar c) {
// point.properties)
static const uint16_t kWhiteSpaceTable0Size = 6;
-static const int32_t kWhiteSpaceTable0[6] = {9, 1073741835, 12,
- 32, 160, 5760}; // NOLINT
+static const int32_t kWhiteSpaceTable0[6] = {9, 1073741835, 12, 32, 160, 5760};
static const uint16_t kWhiteSpaceTable1Size = 5;
-static const int32_t kWhiteSpaceTable1[5] = {1073741824, 10, 47, 95,
- 4096}; // NOLINT
+static const int32_t kWhiteSpaceTable1[5] = {1073741824, 10, 47, 95, 4096};
static const uint16_t kWhiteSpaceTable7Size = 1;
-static const int32_t kWhiteSpaceTable7[1] = {7935}; // NOLINT
+static const int32_t kWhiteSpaceTable7[1] = {7935};
bool WhiteSpace::Is(uchar c) {
int chunk_index = c >> 13;
switch (chunk_index) {
@@ -996,270 +801,189 @@ bool WhiteSpace::Is(uchar c) {
#endif // !V8_INTL_SUPPORT
#ifndef V8_INTL_SUPPORT
-static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] =
- { // NOLINT
- {{105, 775}},
- {{kSentinel}}}; // NOLINT
-static const uint16_t kToLowercaseTable0Size = 488; // NOLINT
+static const MultiCharacterSpecialCase<2> kToLowercaseMultiStrings0[2] = {
+ {{105, 775}}, {{kSentinel}}};
+static const uint16_t kToLowercaseTable0Size = 488;
static const int32_t kToLowercaseTable0[976] = {
- 1073741889, 128, 90, 128, 1073742016, 128,
- 214, 128, 1073742040, 128, 222, 128,
- 256, 4, 258, 4, // NOLINT
- 260, 4, 262, 4, 264, 4,
- 266, 4, 268, 4, 270, 4,
- 272, 4, 274, 4, // NOLINT
- 276, 4, 278, 4, 280, 4,
- 282, 4, 284, 4, 286, 4,
- 288, 4, 290, 4, // NOLINT
- 292, 4, 294, 4, 296, 4,
- 298, 4, 300, 4, 302, 4,
- 304, 1, 306, 4, // NOLINT
- 308, 4, 310, 4, 313, 4,
- 315, 4, 317, 4, 319, 4,
- 321, 4, 323, 4, // NOLINT
- 325, 4, 327, 4, 330, 4,
- 332, 4, 334, 4, 336, 4,
- 338, 4, 340, 4, // NOLINT
- 342, 4, 344, 4, 346, 4,
- 348, 4, 350, 4, 352, 4,
- 354, 4, 356, 4, // NOLINT
- 358, 4, 360, 4, 362, 4,
- 364, 4, 366, 4, 368, 4,
- 370, 4, 372, 4, // NOLINT
- 374, 4, 376, -484, 377, 4,
- 379, 4, 381, 4, 385, 840,
- 386, 4, 388, 4, // NOLINT
- 390, 824, 391, 4, 1073742217, 820,
- 394, 820, 395, 4, 398, 316,
- 399, 808, 400, 812, // NOLINT
- 401, 4, 403, 820, 404, 828,
- 406, 844, 407, 836, 408, 4,
- 412, 844, 413, 852, // NOLINT
- 415, 856, 416, 4, 418, 4,
- 420, 4, 422, 872, 423, 4,
- 425, 872, 428, 4, // NOLINT
- 430, 872, 431, 4, 1073742257, 868,
- 434, 868, 435, 4, 437, 4,
- 439, 876, 440, 4, // NOLINT
- 444, 4, 452, 8, 453, 4,
- 455, 8, 456, 4, 458, 8,
- 459, 4, 461, 4, // NOLINT
- 463, 4, 465, 4, 467, 4,
- 469, 4, 471, 4, 473, 4,
- 475, 4, 478, 4, // NOLINT
- 480, 4, 482, 4, 484, 4,
- 486, 4, 488, 4, 490, 4,
- 492, 4, 494, 4, // NOLINT
- 497, 8, 498, 4, 500, 4,
- 502, -388, 503, -224, 504, 4,
- 506, 4, 508, 4, // NOLINT
- 510, 4, 512, 4, 514, 4,
- 516, 4, 518, 4, 520, 4,
- 522, 4, 524, 4, // NOLINT
- 526, 4, 528, 4, 530, 4,
- 532, 4, 534, 4, 536, 4,
- 538, 4, 540, 4, // NOLINT
- 542, 4, 544, -520, 546, 4,
- 548, 4, 550, 4, 552, 4,
- 554, 4, 556, 4, // NOLINT
- 558, 4, 560, 4, 562, 4,
- 570, 43180, 571, 4, 573, -652,
- 574, 43168, 577, 4, // NOLINT
- 579, -780, 580, 276, 581, 284,
- 582, 4, 584, 4, 586, 4,
- 588, 4, 590, 4, // NOLINT
- 880, 4, 882, 4, 886, 4,
- 895, 464, 902, 152, 1073742728, 148,
- 906, 148, 908, 256, // NOLINT
- 1073742734, 252, 911, 252, 1073742737, 128,
- 929, 128, 931, 6, 1073742756, 128,
- 939, 128, 975, 32, // NOLINT
- 984, 4, 986, 4, 988, 4,
- 990, 4, 992, 4, 994, 4,
- 996, 4, 998, 4, // NOLINT
- 1000, 4, 1002, 4, 1004, 4,
- 1006, 4, 1012, -240, 1015, 4,
- 1017, -28, 1018, 4, // NOLINT
- 1073742845, -520, 1023, -520, 1073742848, 320,
- 1039, 320, 1073742864, 128, 1071, 128,
- 1120, 4, 1122, 4, // NOLINT
- 1124, 4, 1126, 4, 1128, 4,
- 1130, 4, 1132, 4, 1134, 4,
- 1136, 4, 1138, 4, // NOLINT
- 1140, 4, 1142, 4, 1144, 4,
- 1146, 4, 1148, 4, 1150, 4,
- 1152, 4, 1162, 4, // NOLINT
- 1164, 4, 1166, 4, 1168, 4,
- 1170, 4, 1172, 4, 1174, 4,
- 1176, 4, 1178, 4, // NOLINT
- 1180, 4, 1182, 4, 1184, 4,
- 1186, 4, 1188, 4, 1190, 4,
- 1192, 4, 1194, 4, // NOLINT
- 1196, 4, 1198, 4, 1200, 4,
- 1202, 4, 1204, 4, 1206, 4,
- 1208, 4, 1210, 4, // NOLINT
- 1212, 4, 1214, 4, 1216, 60,
- 1217, 4, 1219, 4, 1221, 4,
- 1223, 4, 1225, 4, // NOLINT
- 1227, 4, 1229, 4, 1232, 4,
- 1234, 4, 1236, 4, 1238, 4,
- 1240, 4, 1242, 4, // NOLINT
- 1244, 4, 1246, 4, 1248, 4,
- 1250, 4, 1252, 4, 1254, 4,
- 1256, 4, 1258, 4, // NOLINT
- 1260, 4, 1262, 4, 1264, 4,
- 1266, 4, 1268, 4, 1270, 4,
- 1272, 4, 1274, 4, // NOLINT
- 1276, 4, 1278, 4, 1280, 4,
- 1282, 4, 1284, 4, 1286, 4,
- 1288, 4, 1290, 4, // NOLINT
- 1292, 4, 1294, 4, 1296, 4,
- 1298, 4, 1300, 4, 1302, 4,
- 1304, 4, 1306, 4, // NOLINT
- 1308, 4, 1310, 4, 1312, 4,
- 1314, 4, 1316, 4, 1318, 4,
- 1320, 4, 1322, 4, // NOLINT
- 1324, 4, 1326, 4, 1073743153, 192,
- 1366, 192, 1073746080, 29056, 4293, 29056,
- 4295, 29056, 4301, 29056, // NOLINT
- 7680, 4, 7682, 4, 7684, 4,
- 7686, 4, 7688, 4, 7690, 4,
- 7692, 4, 7694, 4, // NOLINT
- 7696, 4, 7698, 4, 7700, 4,
- 7702, 4, 7704, 4, 7706, 4,
- 7708, 4, 7710, 4, // NOLINT
- 7712, 4, 7714, 4, 7716, 4,
- 7718, 4, 7720, 4, 7722, 4,
- 7724, 4, 7726, 4, // NOLINT
- 7728, 4, 7730, 4, 7732, 4,
- 7734, 4, 7736, 4, 7738, 4,
- 7740, 4, 7742, 4, // NOLINT
- 7744, 4, 7746, 4, 7748, 4,
- 7750, 4, 7752, 4, 7754, 4,
- 7756, 4, 7758, 4, // NOLINT
- 7760, 4, 7762, 4, 7764, 4,
- 7766, 4, 7768, 4, 7770, 4,
- 7772, 4, 7774, 4, // NOLINT
- 7776, 4, 7778, 4, 7780, 4,
- 7782, 4, 7784, 4, 7786, 4,
- 7788, 4, 7790, 4, // NOLINT
- 7792, 4, 7794, 4, 7796, 4,
- 7798, 4, 7800, 4, 7802, 4,
- 7804, 4, 7806, 4, // NOLINT
- 7808, 4, 7810, 4, 7812, 4,
- 7814, 4, 7816, 4, 7818, 4,
- 7820, 4, 7822, 4, // NOLINT
- 7824, 4, 7826, 4, 7828, 4,
- 7838, -30460, 7840, 4, 7842, 4,
- 7844, 4, 7846, 4, // NOLINT
- 7848, 4, 7850, 4, 7852, 4,
- 7854, 4, 7856, 4, 7858, 4,
- 7860, 4, 7862, 4, // NOLINT
- 7864, 4, 7866, 4, 7868, 4,
- 7870, 4, 7872, 4, 7874, 4,
- 7876, 4, 7878, 4, // NOLINT
- 7880, 4, 7882, 4, 7884, 4,
- 7886, 4, 7888, 4, 7890, 4,
- 7892, 4, 7894, 4, // NOLINT
- 7896, 4, 7898, 4, 7900, 4,
- 7902, 4, 7904, 4, 7906, 4,
- 7908, 4, 7910, 4, // NOLINT
- 7912, 4, 7914, 4, 7916, 4,
- 7918, 4, 7920, 4, 7922, 4,
- 7924, 4, 7926, 4, // NOLINT
- 7928, 4, 7930, 4, 7932, 4,
- 7934, 4, 1073749768, -32, 7951, -32,
- 1073749784, -32, 7965, -32, // NOLINT
- 1073749800, -32, 7983, -32, 1073749816, -32,
- 7999, -32, 1073749832, -32, 8013, -32,
- 8025, -32, 8027, -32, // NOLINT
- 8029, -32, 8031, -32, 1073749864, -32,
- 8047, -32, 1073749896, -32, 8079, -32,
- 1073749912, -32, 8095, -32, // NOLINT
- 1073749928, -32, 8111, -32, 1073749944, -32,
- 8121, -32, 1073749946, -296, 8123, -296,
- 8124, -36, 1073749960, -344, // NOLINT
- 8139, -344, 8140, -36, 1073749976, -32,
- 8153, -32, 1073749978, -400, 8155, -400,
- 1073749992, -32, 8169, -32, // NOLINT
- 1073749994, -448, 8171, -448, 8172, -28,
- 1073750008, -512, 8185, -512, 1073750010, -504,
- 8187, -504, 8188, -36}; // NOLINT
-static const uint16_t kToLowercaseMultiStrings0Size = 2; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kToLowercaseTable1Size = 79; // NOLINT
+ 1073741889, 128, 90, 128, 1073742016, 128, 214, 128,
+ 1073742040, 128, 222, 128, 256, 4, 258, 4,
+ 260, 4, 262, 4, 264, 4, 266, 4,
+ 268, 4, 270, 4, 272, 4, 274, 4,
+ 276, 4, 278, 4, 280, 4, 282, 4,
+ 284, 4, 286, 4, 288, 4, 290, 4,
+ 292, 4, 294, 4, 296, 4, 298, 4,
+ 300, 4, 302, 4, 304, 1, 306, 4,
+ 308, 4, 310, 4, 313, 4, 315, 4,
+ 317, 4, 319, 4, 321, 4, 323, 4,
+ 325, 4, 327, 4, 330, 4, 332, 4,
+ 334, 4, 336, 4, 338, 4, 340, 4,
+ 342, 4, 344, 4, 346, 4, 348, 4,
+ 350, 4, 352, 4, 354, 4, 356, 4,
+ 358, 4, 360, 4, 362, 4, 364, 4,
+ 366, 4, 368, 4, 370, 4, 372, 4,
+ 374, 4, 376, -484, 377, 4, 379, 4,
+ 381, 4, 385, 840, 386, 4, 388, 4,
+ 390, 824, 391, 4, 1073742217, 820, 394, 820,
+ 395, 4, 398, 316, 399, 808, 400, 812,
+ 401, 4, 403, 820, 404, 828, 406, 844,
+ 407, 836, 408, 4, 412, 844, 413, 852,
+ 415, 856, 416, 4, 418, 4, 420, 4,
+ 422, 872, 423, 4, 425, 872, 428, 4,
+ 430, 872, 431, 4, 1073742257, 868, 434, 868,
+ 435, 4, 437, 4, 439, 876, 440, 4,
+ 444, 4, 452, 8, 453, 4, 455, 8,
+ 456, 4, 458, 8, 459, 4, 461, 4,
+ 463, 4, 465, 4, 467, 4, 469, 4,
+ 471, 4, 473, 4, 475, 4, 478, 4,
+ 480, 4, 482, 4, 484, 4, 486, 4,
+ 488, 4, 490, 4, 492, 4, 494, 4,
+ 497, 8, 498, 4, 500, 4, 502, -388,
+ 503, -224, 504, 4, 506, 4, 508, 4,
+ 510, 4, 512, 4, 514, 4, 516, 4,
+ 518, 4, 520, 4, 522, 4, 524, 4,
+ 526, 4, 528, 4, 530, 4, 532, 4,
+ 534, 4, 536, 4, 538, 4, 540, 4,
+ 542, 4, 544, -520, 546, 4, 548, 4,
+ 550, 4, 552, 4, 554, 4, 556, 4,
+ 558, 4, 560, 4, 562, 4, 570, 43180,
+ 571, 4, 573, -652, 574, 43168, 577, 4,
+ 579, -780, 580, 276, 581, 284, 582, 4,
+ 584, 4, 586, 4, 588, 4, 590, 4,
+ 880, 4, 882, 4, 886, 4, 895, 464,
+ 902, 152, 1073742728, 148, 906, 148, 908, 256,
+ 1073742734, 252, 911, 252, 1073742737, 128, 929, 128,
+ 931, 6, 1073742756, 128, 939, 128, 975, 32,
+ 984, 4, 986, 4, 988, 4, 990, 4,
+ 992, 4, 994, 4, 996, 4, 998, 4,
+ 1000, 4, 1002, 4, 1004, 4, 1006, 4,
+ 1012, -240, 1015, 4, 1017, -28, 1018, 4,
+ 1073742845, -520, 1023, -520, 1073742848, 320, 1039, 320,
+ 1073742864, 128, 1071, 128, 1120, 4, 1122, 4,
+ 1124, 4, 1126, 4, 1128, 4, 1130, 4,
+ 1132, 4, 1134, 4, 1136, 4, 1138, 4,
+ 1140, 4, 1142, 4, 1144, 4, 1146, 4,
+ 1148, 4, 1150, 4, 1152, 4, 1162, 4,
+ 1164, 4, 1166, 4, 1168, 4, 1170, 4,
+ 1172, 4, 1174, 4, 1176, 4, 1178, 4,
+ 1180, 4, 1182, 4, 1184, 4, 1186, 4,
+ 1188, 4, 1190, 4, 1192, 4, 1194, 4,
+ 1196, 4, 1198, 4, 1200, 4, 1202, 4,
+ 1204, 4, 1206, 4, 1208, 4, 1210, 4,
+ 1212, 4, 1214, 4, 1216, 60, 1217, 4,
+ 1219, 4, 1221, 4, 1223, 4, 1225, 4,
+ 1227, 4, 1229, 4, 1232, 4, 1234, 4,
+ 1236, 4, 1238, 4, 1240, 4, 1242, 4,
+ 1244, 4, 1246, 4, 1248, 4, 1250, 4,
+ 1252, 4, 1254, 4, 1256, 4, 1258, 4,
+ 1260, 4, 1262, 4, 1264, 4, 1266, 4,
+ 1268, 4, 1270, 4, 1272, 4, 1274, 4,
+ 1276, 4, 1278, 4, 1280, 4, 1282, 4,
+ 1284, 4, 1286, 4, 1288, 4, 1290, 4,
+ 1292, 4, 1294, 4, 1296, 4, 1298, 4,
+ 1300, 4, 1302, 4, 1304, 4, 1306, 4,
+ 1308, 4, 1310, 4, 1312, 4, 1314, 4,
+ 1316, 4, 1318, 4, 1320, 4, 1322, 4,
+ 1324, 4, 1326, 4, 1073743153, 192, 1366, 192,
+ 1073746080, 29056, 4293, 29056, 4295, 29056, 4301, 29056,
+ 7680, 4, 7682, 4, 7684, 4, 7686, 4,
+ 7688, 4, 7690, 4, 7692, 4, 7694, 4,
+ 7696, 4, 7698, 4, 7700, 4, 7702, 4,
+ 7704, 4, 7706, 4, 7708, 4, 7710, 4,
+ 7712, 4, 7714, 4, 7716, 4, 7718, 4,
+ 7720, 4, 7722, 4, 7724, 4, 7726, 4,
+ 7728, 4, 7730, 4, 7732, 4, 7734, 4,
+ 7736, 4, 7738, 4, 7740, 4, 7742, 4,
+ 7744, 4, 7746, 4, 7748, 4, 7750, 4,
+ 7752, 4, 7754, 4, 7756, 4, 7758, 4,
+ 7760, 4, 7762, 4, 7764, 4, 7766, 4,
+ 7768, 4, 7770, 4, 7772, 4, 7774, 4,
+ 7776, 4, 7778, 4, 7780, 4, 7782, 4,
+ 7784, 4, 7786, 4, 7788, 4, 7790, 4,
+ 7792, 4, 7794, 4, 7796, 4, 7798, 4,
+ 7800, 4, 7802, 4, 7804, 4, 7806, 4,
+ 7808, 4, 7810, 4, 7812, 4, 7814, 4,
+ 7816, 4, 7818, 4, 7820, 4, 7822, 4,
+ 7824, 4, 7826, 4, 7828, 4, 7838, -30460,
+ 7840, 4, 7842, 4, 7844, 4, 7846, 4,
+ 7848, 4, 7850, 4, 7852, 4, 7854, 4,
+ 7856, 4, 7858, 4, 7860, 4, 7862, 4,
+ 7864, 4, 7866, 4, 7868, 4, 7870, 4,
+ 7872, 4, 7874, 4, 7876, 4, 7878, 4,
+ 7880, 4, 7882, 4, 7884, 4, 7886, 4,
+ 7888, 4, 7890, 4, 7892, 4, 7894, 4,
+ 7896, 4, 7898, 4, 7900, 4, 7902, 4,
+ 7904, 4, 7906, 4, 7908, 4, 7910, 4,
+ 7912, 4, 7914, 4, 7916, 4, 7918, 4,
+ 7920, 4, 7922, 4, 7924, 4, 7926, 4,
+ 7928, 4, 7930, 4, 7932, 4, 7934, 4,
+ 1073749768, -32, 7951, -32, 1073749784, -32, 7965, -32,
+ 1073749800, -32, 7983, -32, 1073749816, -32, 7999, -32,
+ 1073749832, -32, 8013, -32, 8025, -32, 8027, -32,
+ 8029, -32, 8031, -32, 1073749864, -32, 8047, -32,
+ 1073749896, -32, 8079, -32, 1073749912, -32, 8095, -32,
+ 1073749928, -32, 8111, -32, 1073749944, -32, 8121, -32,
+ 1073749946, -296, 8123, -296, 8124, -36, 1073749960, -344,
+ 8139, -344, 8140, -36, 1073749976, -32, 8153, -32,
+ 1073749978, -400, 8155, -400, 1073749992, -32, 8169, -32,
+ 1073749994, -448, 8171, -448, 8172, -28, 1073750008, -512,
+ 8185, -512, 1073750010, -504, 8187, -504, 8188, -36};
+static const uint16_t kToLowercaseMultiStrings0Size = 2;
+static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings1[1] = {
+ {{kSentinel}}};
+static const uint16_t kToLowercaseTable1Size = 79;
static const int32_t kToLowercaseTable1[158] = {
- 294, -30068, 298, -33532, 299, -33048, 306,
- 112, 1073742176, 64, 367, 64, 387, 4,
- 1073743030, 104, // NOLINT
- 1231, 104, 1073744896, 192, 3118, 192, 3168,
- 4, 3170, -42972, 3171, -15256, 3172, -42908,
- 3175, 4, // NOLINT
- 3177, 4, 3179, 4, 3181, -43120, 3182,
- -42996, 3183, -43132, 3184, -43128, 3186, 4,
- 3189, 4, // NOLINT
- 1073745022, -43260, 3199, -43260, 3200, 4, 3202,
- 4, 3204, 4, 3206, 4, 3208, 4,
- 3210, 4, // NOLINT
- 3212, 4, 3214, 4, 3216, 4, 3218,
- 4, 3220, 4, 3222, 4, 3224, 4,
- 3226, 4, // NOLINT
- 3228, 4, 3230, 4, 3232, 4, 3234,
- 4, 3236, 4, 3238, 4, 3240, 4,
- 3242, 4, // NOLINT
- 3244, 4, 3246, 4, 3248, 4, 3250,
- 4, 3252, 4, 3254, 4, 3256, 4,
- 3258, 4, // NOLINT
- 3260, 4, 3262, 4, 3264, 4, 3266,
- 4, 3268, 4, 3270, 4, 3272, 4,
- 3274, 4, // NOLINT
- 3276, 4, 3278, 4, 3280, 4, 3282,
- 4, 3284, 4, 3286, 4, 3288, 4,
- 3290, 4, // NOLINT
- 3292, 4, 3294, 4, 3296, 4, 3298,
- 4, 3307, 4, 3309, 4, 3314, 4}; // NOLINT
-static const uint16_t kToLowercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings5[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kToLowercaseTable5Size = 103; // NOLINT
+ 294, -30068, 298, -33532, 299, -33048, 306, 112,
+ 1073742176, 64, 367, 64, 387, 4, 1073743030, 104,
+ 1231, 104, 1073744896, 192, 3118, 192, 3168, 4,
+ 3170, -42972, 3171, -15256, 3172, -42908, 3175, 4,
+ 3177, 4, 3179, 4, 3181, -43120, 3182, -42996,
+ 3183, -43132, 3184, -43128, 3186, 4, 3189, 4,
+ 1073745022, -43260, 3199, -43260, 3200, 4, 3202, 4,
+ 3204, 4, 3206, 4, 3208, 4, 3210, 4,
+ 3212, 4, 3214, 4, 3216, 4, 3218, 4,
+ 3220, 4, 3222, 4, 3224, 4, 3226, 4,
+ 3228, 4, 3230, 4, 3232, 4, 3234, 4,
+ 3236, 4, 3238, 4, 3240, 4, 3242, 4,
+ 3244, 4, 3246, 4, 3248, 4, 3250, 4,
+ 3252, 4, 3254, 4, 3256, 4, 3258, 4,
+ 3260, 4, 3262, 4, 3264, 4, 3266, 4,
+ 3268, 4, 3270, 4, 3272, 4, 3274, 4,
+ 3276, 4, 3278, 4, 3280, 4, 3282, 4,
+ 3284, 4, 3286, 4, 3288, 4, 3290, 4,
+ 3292, 4, 3294, 4, 3296, 4, 3298, 4,
+ 3307, 4, 3309, 4, 3314, 4};
+static const uint16_t kToLowercaseMultiStrings1Size = 1;
+static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings5[1] = {
+ {{kSentinel}}};
+static const uint16_t kToLowercaseTable5Size = 103;
static const int32_t kToLowercaseTable5[206] = {
- 1600, 4, 1602, 4, 1604, 4, 1606, 4,
- 1608, 4, 1610, 4, 1612, 4, 1614, 4, // NOLINT
- 1616, 4, 1618, 4, 1620, 4, 1622, 4,
- 1624, 4, 1626, 4, 1628, 4, 1630, 4, // NOLINT
- 1632, 4, 1634, 4, 1636, 4, 1638, 4,
- 1640, 4, 1642, 4, 1644, 4, 1664, 4, // NOLINT
- 1666, 4, 1668, 4, 1670, 4, 1672, 4,
- 1674, 4, 1676, 4, 1678, 4, 1680, 4, // NOLINT
- 1682, 4, 1684, 4, 1686, 4, 1688, 4,
- 1690, 4, 1826, 4, 1828, 4, 1830, 4, // NOLINT
- 1832, 4, 1834, 4, 1836, 4, 1838, 4,
- 1842, 4, 1844, 4, 1846, 4, 1848, 4, // NOLINT
- 1850, 4, 1852, 4, 1854, 4, 1856, 4,
- 1858, 4, 1860, 4, 1862, 4, 1864, 4, // NOLINT
- 1866, 4, 1868, 4, 1870, 4, 1872, 4,
- 1874, 4, 1876, 4, 1878, 4, 1880, 4, // NOLINT
- 1882, 4, 1884, 4, 1886, 4, 1888, 4,
- 1890, 4, 1892, 4, 1894, 4, 1896, 4, // NOLINT
- 1898, 4, 1900, 4, 1902, 4, 1913, 4,
- 1915, 4, 1917, -141328, 1918, 4, 1920, 4, // NOLINT
- 1922, 4, 1924, 4, 1926, 4, 1931, 4,
- 1933, -169120, 1936, 4, 1938, 4, 1942, 4, // NOLINT
- 1944, 4, 1946, 4, 1948, 4, 1950, 4,
- 1952, 4, 1954, 4, 1956, 4, 1958, 4, // NOLINT
- 1960, 4, 1962, -169232, 1963, -169276, 1964, -169260,
- 1965, -169220, 1968, -169032, 1969, -169128}; // NOLINT
-static const uint16_t kToLowercaseMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kToLowercaseTable7Size = 2; // NOLINT
-static const int32_t kToLowercaseTable7[4] = {1073749793, 128, 7994,
- 128}; // NOLINT
-static const uint16_t kToLowercaseMultiStrings7Size = 1; // NOLINT
+ 1600, 4, 1602, 4, 1604, 4, 1606, 4, 1608, 4,
+ 1610, 4, 1612, 4, 1614, 4, 1616, 4, 1618, 4,
+ 1620, 4, 1622, 4, 1624, 4, 1626, 4, 1628, 4,
+ 1630, 4, 1632, 4, 1634, 4, 1636, 4, 1638, 4,
+ 1640, 4, 1642, 4, 1644, 4, 1664, 4, 1666, 4,
+ 1668, 4, 1670, 4, 1672, 4, 1674, 4, 1676, 4,
+ 1678, 4, 1680, 4, 1682, 4, 1684, 4, 1686, 4,
+ 1688, 4, 1690, 4, 1826, 4, 1828, 4, 1830, 4,
+ 1832, 4, 1834, 4, 1836, 4, 1838, 4, 1842, 4,
+ 1844, 4, 1846, 4, 1848, 4, 1850, 4, 1852, 4,
+ 1854, 4, 1856, 4, 1858, 4, 1860, 4, 1862, 4,
+ 1864, 4, 1866, 4, 1868, 4, 1870, 4, 1872, 4,
+ 1874, 4, 1876, 4, 1878, 4, 1880, 4, 1882, 4,
+ 1884, 4, 1886, 4, 1888, 4, 1890, 4, 1892, 4,
+ 1894, 4, 1896, 4, 1898, 4, 1900, 4, 1902, 4,
+ 1913, 4, 1915, 4, 1917, -141328, 1918, 4, 1920, 4,
+ 1922, 4, 1924, 4, 1926, 4, 1931, 4, 1933, -169120,
+ 1936, 4, 1938, 4, 1942, 4, 1944, 4, 1946, 4,
+ 1948, 4, 1950, 4, 1952, 4, 1954, 4, 1956, 4,
+ 1958, 4, 1960, 4, 1962, -169232, 1963, -169276, 1964, -169260,
+ 1965, -169220, 1968, -169032, 1969, -169128};
+static const uint16_t kToLowercaseMultiStrings5Size = 1;
+static const MultiCharacterSpecialCase<1> kToLowercaseMultiStrings7[1] = {
+ {{kSentinel}}};
+static const uint16_t kToLowercaseTable7Size = 2;
+static const int32_t kToLowercaseTable7[4] = {1073749793, 128, 7994, 128};
+static const uint16_t kToLowercaseMultiStrings7Size = 1;
int ToLowercase::Convert(uchar c, uchar n, uchar* result,
bool* allow_caching_ptr) {
int chunk_index = c >> 13;
@@ -1285,348 +1009,296 @@ int ToLowercase::Convert(uchar c, uchar n, uchar* result,
}
}
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings0[62] =
- { // NOLINT
- {{83, 83, kSentinel}}, {{700, 78, kSentinel}},
- {{74, 780, kSentinel}}, {{921, 776, 769}}, // NOLINT
- {{933, 776, 769}}, {{1333, 1362, kSentinel}},
- {{72, 817, kSentinel}}, {{84, 776, kSentinel}}, // NOLINT
- {{87, 778, kSentinel}}, {{89, 778, kSentinel}},
- {{65, 702, kSentinel}}, {{933, 787, kSentinel}}, // NOLINT
- {{933, 787, 768}}, {{933, 787, 769}},
- {{933, 787, 834}}, {{7944, 921, kSentinel}}, // NOLINT
- {{7945, 921, kSentinel}}, {{7946, 921, kSentinel}},
- {{7947, 921, kSentinel}}, {{7948, 921, kSentinel}}, // NOLINT
- {{7949, 921, kSentinel}}, {{7950, 921, kSentinel}},
- {{7951, 921, kSentinel}}, {{7976, 921, kSentinel}}, // NOLINT
- {{7977, 921, kSentinel}}, {{7978, 921, kSentinel}},
- {{7979, 921, kSentinel}}, {{7980, 921, kSentinel}}, // NOLINT
- {{7981, 921, kSentinel}}, {{7982, 921, kSentinel}},
- {{7983, 921, kSentinel}}, {{8040, 921, kSentinel}}, // NOLINT
- {{8041, 921, kSentinel}}, {{8042, 921, kSentinel}},
- {{8043, 921, kSentinel}}, {{8044, 921, kSentinel}}, // NOLINT
- {{8045, 921, kSentinel}}, {{8046, 921, kSentinel}},
- {{8047, 921, kSentinel}}, {{8122, 921, kSentinel}}, // NOLINT
- {{913, 921, kSentinel}}, {{902, 921, kSentinel}},
- {{913, 834, kSentinel}}, {{913, 834, 921}}, // NOLINT
- {{8138, 921, kSentinel}}, {{919, 921, kSentinel}},
- {{905, 921, kSentinel}}, {{919, 834, kSentinel}}, // NOLINT
- {{919, 834, 921}}, {{921, 776, 768}},
- {{921, 834, kSentinel}}, {{921, 776, 834}}, // NOLINT
- {{933, 776, 768}}, {{929, 787, kSentinel}},
- {{933, 834, kSentinel}}, {{933, 776, 834}}, // NOLINT
- {{8186, 921, kSentinel}}, {{937, 921, kSentinel}},
- {{911, 921, kSentinel}}, {{937, 834, kSentinel}}, // NOLINT
- {{937, 834, 921}}, {{kSentinel}}}; // NOLINT
-static const uint16_t kToUppercaseTable0Size = 590; // NOLINT
+static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings0[62] = {
+ {{83, 83, kSentinel}}, {{700, 78, kSentinel}},
+ {{74, 780, kSentinel}}, {{921, 776, 769}},
+ {{933, 776, 769}}, {{1333, 1362, kSentinel}},
+ {{72, 817, kSentinel}}, {{84, 776, kSentinel}},
+ {{87, 778, kSentinel}}, {{89, 778, kSentinel}},
+ {{65, 702, kSentinel}}, {{933, 787, kSentinel}},
+ {{933, 787, 768}}, {{933, 787, 769}},
+ {{933, 787, 834}}, {{7944, 921, kSentinel}},
+ {{7945, 921, kSentinel}}, {{7946, 921, kSentinel}},
+ {{7947, 921, kSentinel}}, {{7948, 921, kSentinel}},
+ {{7949, 921, kSentinel}}, {{7950, 921, kSentinel}},
+ {{7951, 921, kSentinel}}, {{7976, 921, kSentinel}},
+ {{7977, 921, kSentinel}}, {{7978, 921, kSentinel}},
+ {{7979, 921, kSentinel}}, {{7980, 921, kSentinel}},
+ {{7981, 921, kSentinel}}, {{7982, 921, kSentinel}},
+ {{7983, 921, kSentinel}}, {{8040, 921, kSentinel}},
+ {{8041, 921, kSentinel}}, {{8042, 921, kSentinel}},
+ {{8043, 921, kSentinel}}, {{8044, 921, kSentinel}},
+ {{8045, 921, kSentinel}}, {{8046, 921, kSentinel}},
+ {{8047, 921, kSentinel}}, {{8122, 921, kSentinel}},
+ {{913, 921, kSentinel}}, {{902, 921, kSentinel}},
+ {{913, 834, kSentinel}}, {{913, 834, 921}},
+ {{8138, 921, kSentinel}}, {{919, 921, kSentinel}},
+ {{905, 921, kSentinel}}, {{919, 834, kSentinel}},
+ {{919, 834, 921}}, {{921, 776, 768}},
+ {{921, 834, kSentinel}}, {{921, 776, 834}},
+ {{933, 776, 768}}, {{929, 787, kSentinel}},
+ {{933, 834, kSentinel}}, {{933, 776, 834}},
+ {{8186, 921, kSentinel}}, {{937, 921, kSentinel}},
+ {{911, 921, kSentinel}}, {{937, 834, kSentinel}},
+ {{937, 834, 921}}, {{kSentinel}}};
+static const uint16_t kToUppercaseTable0Size = 590;
static const int32_t kToUppercaseTable0[1180] = {
1073741921, -128, 122, -128, 181, 2972,
223, 1, 1073742048, -128, 246, -128,
- 1073742072, -128, 254, -128, // NOLINT
- 255, 484, 257, -4, 259, -4,
- 261, -4, 263, -4, 265, -4,
- 267, -4, 269, -4, // NOLINT
- 271, -4, 273, -4, 275, -4,
- 277, -4, 279, -4, 281, -4,
- 283, -4, 285, -4, // NOLINT
+ 1073742072, -128, 254, -128, 255, 484,
+ 257, -4, 259, -4, 261, -4,
+ 263, -4, 265, -4, 267, -4,
+ 269, -4, 271, -4, 273, -4,
+ 275, -4, 277, -4, 279, -4,
+ 281, -4, 283, -4, 285, -4,
287, -4, 289, -4, 291, -4,
293, -4, 295, -4, 297, -4,
- 299, -4, 301, -4, // NOLINT
- 303, -4, 305, -928, 307, -4,
- 309, -4, 311, -4, 314, -4,
- 316, -4, 318, -4, // NOLINT
- 320, -4, 322, -4, 324, -4,
- 326, -4, 328, -4, 329, 5,
- 331, -4, 333, -4, // NOLINT
+ 299, -4, 301, -4, 303, -4,
+ 305, -928, 307, -4, 309, -4,
+ 311, -4, 314, -4, 316, -4,
+ 318, -4, 320, -4, 322, -4,
+ 324, -4, 326, -4, 328, -4,
+ 329, 5, 331, -4, 333, -4,
335, -4, 337, -4, 339, -4,
341, -4, 343, -4, 345, -4,
- 347, -4, 349, -4, // NOLINT
- 351, -4, 353, -4, 355, -4,
- 357, -4, 359, -4, 361, -4,
- 363, -4, 365, -4, // NOLINT
- 367, -4, 369, -4, 371, -4,
- 373, -4, 375, -4, 378, -4,
- 380, -4, 382, -4, // NOLINT
+ 347, -4, 349, -4, 351, -4,
+ 353, -4, 355, -4, 357, -4,
+ 359, -4, 361, -4, 363, -4,
+ 365, -4, 367, -4, 369, -4,
+ 371, -4, 373, -4, 375, -4,
+ 378, -4, 380, -4, 382, -4,
383, -1200, 384, 780, 387, -4,
389, -4, 392, -4, 396, -4,
- 402, -4, 405, 388, // NOLINT
- 409, -4, 410, 652, 414, 520,
- 417, -4, 419, -4, 421, -4,
- 424, -4, 429, -4, // NOLINT
- 432, -4, 436, -4, 438, -4,
- 441, -4, 445, -4, 447, 224,
- 453, -4, 454, -8, // NOLINT
+ 402, -4, 405, 388, 409, -4,
+ 410, 652, 414, 520, 417, -4,
+ 419, -4, 421, -4, 424, -4,
+ 429, -4, 432, -4, 436, -4,
+ 438, -4, 441, -4, 445, -4,
+ 447, 224, 453, -4, 454, -8,
456, -4, 457, -8, 459, -4,
460, -8, 462, -4, 464, -4,
- 466, -4, 468, -4, // NOLINT
- 470, -4, 472, -4, 474, -4,
- 476, -4, 477, -316, 479, -4,
- 481, -4, 483, -4, // NOLINT
- 485, -4, 487, -4, 489, -4,
- 491, -4, 493, -4, 495, -4,
- 496, 9, 498, -4, // NOLINT
+ 466, -4, 468, -4, 470, -4,
+ 472, -4, 474, -4, 476, -4,
+ 477, -316, 479, -4, 481, -4,
+ 483, -4, 485, -4, 487, -4,
+ 489, -4, 491, -4, 493, -4,
+ 495, -4, 496, 9, 498, -4,
499, -8, 501, -4, 505, -4,
507, -4, 509, -4, 511, -4,
- 513, -4, 515, -4, // NOLINT
- 517, -4, 519, -4, 521, -4,
- 523, -4, 525, -4, 527, -4,
- 529, -4, 531, -4, // NOLINT
- 533, -4, 535, -4, 537, -4,
- 539, -4, 541, -4, 543, -4,
- 547, -4, 549, -4, // NOLINT
+ 513, -4, 515, -4, 517, -4,
+ 519, -4, 521, -4, 523, -4,
+ 525, -4, 527, -4, 529, -4,
+ 531, -4, 533, -4, 535, -4,
+ 537, -4, 539, -4, 541, -4,
+ 543, -4, 547, -4, 549, -4,
551, -4, 553, -4, 555, -4,
557, -4, 559, -4, 561, -4,
- 563, -4, 572, -4, // NOLINT
- 1073742399, 43260, 576, 43260, 578, -4,
- 583, -4, 585, -4, 587, -4,
- 589, -4, 591, -4, // NOLINT
- 592, 43132, 593, 43120, 594, 43128,
- 595, -840, 596, -824, 1073742422, -820,
- 599, -820, 601, -808, // NOLINT
+ 563, -4, 572, -4, 1073742399, 43260,
+ 576, 43260, 578, -4, 583, -4,
+ 585, -4, 587, -4, 589, -4,
+ 591, -4, 592, 43132, 593, 43120,
+ 594, 43128, 595, -840, 596, -824,
+ 1073742422, -820, 599, -820, 601, -808,
603, -812, 604, 169276, 608, -820,
609, 169260, 611, -828, 613, 169120,
- 614, 169232, 616, -836, // NOLINT
- 617, -844, 619, 42972, 620, 169220,
- 623, -844, 625, 42996, 626, -852,
- 629, -856, 637, 42908, // NOLINT
- 640, -872, 643, -872, 647, 169128,
- 648, -872, 649, -276, 1073742474, -868,
- 651, -868, 652, -284, // NOLINT
+ 614, 169232, 616, -836, 617, -844,
+ 619, 42972, 620, 169220, 623, -844,
+ 625, 42996, 626, -852, 629, -856,
+ 637, 42908, 640, -872, 643, -872,
+ 647, 169128, 648, -872, 649, -276,
+ 1073742474, -868, 651, -868, 652, -284,
658, -876, 670, 169032, 837, 336,
881, -4, 883, -4, 887, -4,
- 1073742715, 520, 893, 520, // NOLINT
- 912, 13, 940, -152, 1073742765, -148,
- 943, -148, 944, 17, 1073742769, -128,
- 961, -128, 962, -124, // NOLINT
- 1073742787, -128, 971, -128, 972, -256,
- 1073742797, -252, 974, -252, 976, -248,
- 977, -228, 981, -188, // NOLINT
+ 1073742715, 520, 893, 520, 912, 13,
+ 940, -152, 1073742765, -148, 943, -148,
+ 944, 17, 1073742769, -128, 961, -128,
+ 962, -124, 1073742787, -128, 971, -128,
+ 972, -256, 1073742797, -252, 974, -252,
+ 976, -248, 977, -228, 981, -188,
982, -216, 983, -32, 985, -4,
987, -4, 989, -4, 991, -4,
- 993, -4, 995, -4, // NOLINT
- 997, -4, 999, -4, 1001, -4,
- 1003, -4, 1005, -4, 1007, -4,
- 1008, -344, 1009, -320, // NOLINT
- 1010, 28, 1011, -464, 1013, -384,
- 1016, -4, 1019, -4, 1073742896, -128,
- 1103, -128, 1073742928, -320, // NOLINT
+ 993, -4, 995, -4, 997, -4,
+ 999, -4, 1001, -4, 1003, -4,
+ 1005, -4, 1007, -4, 1008, -344,
+ 1009, -320, 1010, 28, 1011, -464,
+ 1013, -384, 1016, -4, 1019, -4,
+ 1073742896, -128, 1103, -128, 1073742928, -320,
1119, -320, 1121, -4, 1123, -4,
1125, -4, 1127, -4, 1129, -4,
- 1131, -4, 1133, -4, // NOLINT
- 1135, -4, 1137, -4, 1139, -4,
- 1141, -4, 1143, -4, 1145, -4,
- 1147, -4, 1149, -4, // NOLINT
- 1151, -4, 1153, -4, 1163, -4,
- 1165, -4, 1167, -4, 1169, -4,
- 1171, -4, 1173, -4, // NOLINT
+ 1131, -4, 1133, -4, 1135, -4,
+ 1137, -4, 1139, -4, 1141, -4,
+ 1143, -4, 1145, -4, 1147, -4,
+ 1149, -4, 1151, -4, 1153, -4,
+ 1163, -4, 1165, -4, 1167, -4,
+ 1169, -4, 1171, -4, 1173, -4,
1175, -4, 1177, -4, 1179, -4,
1181, -4, 1183, -4, 1185, -4,
- 1187, -4, 1189, -4, // NOLINT
- 1191, -4, 1193, -4, 1195, -4,
- 1197, -4, 1199, -4, 1201, -4,
- 1203, -4, 1205, -4, // NOLINT
- 1207, -4, 1209, -4, 1211, -4,
- 1213, -4, 1215, -4, 1218, -4,
- 1220, -4, 1222, -4, // NOLINT
+ 1187, -4, 1189, -4, 1191, -4,
+ 1193, -4, 1195, -4, 1197, -4,
+ 1199, -4, 1201, -4, 1203, -4,
+ 1205, -4, 1207, -4, 1209, -4,
+ 1211, -4, 1213, -4, 1215, -4,
+ 1218, -4, 1220, -4, 1222, -4,
1224, -4, 1226, -4, 1228, -4,
1230, -4, 1231, -60, 1233, -4,
- 1235, -4, 1237, -4, // NOLINT
- 1239, -4, 1241, -4, 1243, -4,
- 1245, -4, 1247, -4, 1249, -4,
- 1251, -4, 1253, -4, // NOLINT
- 1255, -4, 1257, -4, 1259, -4,
- 1261, -4, 1263, -4, 1265, -4,
- 1267, -4, 1269, -4, // NOLINT
+ 1235, -4, 1237, -4, 1239, -4,
+ 1241, -4, 1243, -4, 1245, -4,
+ 1247, -4, 1249, -4, 1251, -4,
+ 1253, -4, 1255, -4, 1257, -4,
+ 1259, -4, 1261, -4, 1263, -4,
+ 1265, -4, 1267, -4, 1269, -4,
1271, -4, 1273, -4, 1275, -4,
1277, -4, 1279, -4, 1281, -4,
- 1283, -4, 1285, -4, // NOLINT
- 1287, -4, 1289, -4, 1291, -4,
- 1293, -4, 1295, -4, 1297, -4,
- 1299, -4, 1301, -4, // NOLINT
- 1303, -4, 1305, -4, 1307, -4,
- 1309, -4, 1311, -4, 1313, -4,
- 1315, -4, 1317, -4, // NOLINT
+ 1283, -4, 1285, -4, 1287, -4,
+ 1289, -4, 1291, -4, 1293, -4,
+ 1295, -4, 1297, -4, 1299, -4,
+ 1301, -4, 1303, -4, 1305, -4,
+ 1307, -4, 1309, -4, 1311, -4,
+ 1313, -4, 1315, -4, 1317, -4,
1319, -4, 1321, -4, 1323, -4,
1325, -4, 1327, -4, 1073743201, -192,
- 1414, -192, 1415, 21, // NOLINT
- 7545, 141328, 7549, 15256, 7681, -4,
- 7683, -4, 7685, -4, 7687, -4,
- 7689, -4, 7691, -4, // NOLINT
- 7693, -4, 7695, -4, 7697, -4,
- 7699, -4, 7701, -4, 7703, -4,
- 7705, -4, 7707, -4, // NOLINT
+ 1414, -192, 1415, 21, 7545, 141328,
+ 7549, 15256, 7681, -4, 7683, -4,
+ 7685, -4, 7687, -4, 7689, -4,
+ 7691, -4, 7693, -4, 7695, -4,
+ 7697, -4, 7699, -4, 7701, -4,
+ 7703, -4, 7705, -4, 7707, -4,
7709, -4, 7711, -4, 7713, -4,
7715, -4, 7717, -4, 7719, -4,
- 7721, -4, 7723, -4, // NOLINT
- 7725, -4, 7727, -4, 7729, -4,
- 7731, -4, 7733, -4, 7735, -4,
- 7737, -4, 7739, -4, // NOLINT
- 7741, -4, 7743, -4, 7745, -4,
- 7747, -4, 7749, -4, 7751, -4,
- 7753, -4, 7755, -4, // NOLINT
+ 7721, -4, 7723, -4, 7725, -4,
+ 7727, -4, 7729, -4, 7731, -4,
+ 7733, -4, 7735, -4, 7737, -4,
+ 7739, -4, 7741, -4, 7743, -4,
+ 7745, -4, 7747, -4, 7749, -4,
+ 7751, -4, 7753, -4, 7755, -4,
7757, -4, 7759, -4, 7761, -4,
7763, -4, 7765, -4, 7767, -4,
- 7769, -4, 7771, -4, // NOLINT
- 7773, -4, 7775, -4, 7777, -4,
- 7779, -4, 7781, -4, 7783, -4,
- 7785, -4, 7787, -4, // NOLINT
- 7789, -4, 7791, -4, 7793, -4,
- 7795, -4, 7797, -4, 7799, -4,
- 7801, -4, 7803, -4, // NOLINT
+ 7769, -4, 7771, -4, 7773, -4,
+ 7775, -4, 7777, -4, 7779, -4,
+ 7781, -4, 7783, -4, 7785, -4,
+ 7787, -4, 7789, -4, 7791, -4,
+ 7793, -4, 7795, -4, 7797, -4,
+ 7799, -4, 7801, -4, 7803, -4,
7805, -4, 7807, -4, 7809, -4,
7811, -4, 7813, -4, 7815, -4,
- 7817, -4, 7819, -4, // NOLINT
- 7821, -4, 7823, -4, 7825, -4,
- 7827, -4, 7829, -4, 7830, 25,
- 7831, 29, 7832, 33, // NOLINT
- 7833, 37, 7834, 41, 7835, -236,
- 7841, -4, 7843, -4, 7845, -4,
- 7847, -4, 7849, -4, // NOLINT
+ 7817, -4, 7819, -4, 7821, -4,
+ 7823, -4, 7825, -4, 7827, -4,
+ 7829, -4, 7830, 25, 7831, 29,
+ 7832, 33, 7833, 37, 7834, 41,
+ 7835, -236, 7841, -4, 7843, -4,
+ 7845, -4, 7847, -4, 7849, -4,
7851, -4, 7853, -4, 7855, -4,
7857, -4, 7859, -4, 7861, -4,
- 7863, -4, 7865, -4, // NOLINT
- 7867, -4, 7869, -4, 7871, -4,
- 7873, -4, 7875, -4, 7877, -4,
- 7879, -4, 7881, -4, // NOLINT
- 7883, -4, 7885, -4, 7887, -4,
- 7889, -4, 7891, -4, 7893, -4,
- 7895, -4, 7897, -4, // NOLINT
+ 7863, -4, 7865, -4, 7867, -4,
+ 7869, -4, 7871, -4, 7873, -4,
+ 7875, -4, 7877, -4, 7879, -4,
+ 7881, -4, 7883, -4, 7885, -4,
+ 7887, -4, 7889, -4, 7891, -4,
+ 7893, -4, 7895, -4, 7897, -4,
7899, -4, 7901, -4, 7903, -4,
7905, -4, 7907, -4, 7909, -4,
- 7911, -4, 7913, -4, // NOLINT
- 7915, -4, 7917, -4, 7919, -4,
- 7921, -4, 7923, -4, 7925, -4,
- 7927, -4, 7929, -4, // NOLINT
- 7931, -4, 7933, -4, 7935, -4,
- 1073749760, 32, 7943, 32, 1073749776, 32,
- 7957, 32, 1073749792, 32, // NOLINT
+ 7911, -4, 7913, -4, 7915, -4,
+ 7917, -4, 7919, -4, 7921, -4,
+ 7923, -4, 7925, -4, 7927, -4,
+ 7929, -4, 7931, -4, 7933, -4,
+ 7935, -4, 1073749760, 32, 7943, 32,
+ 1073749776, 32, 7957, 32, 1073749792, 32,
7975, 32, 1073749808, 32, 7991, 32,
1073749824, 32, 8005, 32, 8016, 45,
- 8017, 32, 8018, 49, // NOLINT
- 8019, 32, 8020, 53, 8021, 32,
- 8022, 57, 8023, 32, 1073749856, 32,
- 8039, 32, 1073749872, 296, // NOLINT
- 8049, 296, 1073749874, 344, 8053, 344,
- 1073749878, 400, 8055, 400, 1073749880, 512,
- 8057, 512, 1073749882, 448, // NOLINT
+ 8017, 32, 8018, 49, 8019, 32,
+ 8020, 53, 8021, 32, 8022, 57,
+ 8023, 32, 1073749856, 32, 8039, 32,
+ 1073749872, 296, 8049, 296, 1073749874, 344,
+ 8053, 344, 1073749878, 400, 8055, 400,
+ 1073749880, 512, 8057, 512, 1073749882, 448,
8059, 448, 1073749884, 504, 8061, 504,
8064, 61, 8065, 65, 8066, 69,
- 8067, 73, 8068, 77, // NOLINT
- 8069, 81, 8070, 85, 8071, 89,
- 8072, 61, 8073, 65, 8074, 69,
- 8075, 73, 8076, 77, // NOLINT
- 8077, 81, 8078, 85, 8079, 89,
- 8080, 93, 8081, 97, 8082, 101,
- 8083, 105, 8084, 109, // NOLINT
+ 8067, 73, 8068, 77, 8069, 81,
+ 8070, 85, 8071, 89, 8072, 61,
+ 8073, 65, 8074, 69, 8075, 73,
+ 8076, 77, 8077, 81, 8078, 85,
+ 8079, 89, 8080, 93, 8081, 97,
+ 8082, 101, 8083, 105, 8084, 109,
8085, 113, 8086, 117, 8087, 121,
8088, 93, 8089, 97, 8090, 101,
- 8091, 105, 8092, 109, // NOLINT
- 8093, 113, 8094, 117, 8095, 121,
- 8096, 125, 8097, 129, 8098, 133,
- 8099, 137, 8100, 141, // NOLINT
- 8101, 145, 8102, 149, 8103, 153,
- 8104, 125, 8105, 129, 8106, 133,
- 8107, 137, 8108, 141, // NOLINT
+ 8091, 105, 8092, 109, 8093, 113,
+ 8094, 117, 8095, 121, 8096, 125,
+ 8097, 129, 8098, 133, 8099, 137,
+ 8100, 141, 8101, 145, 8102, 149,
+ 8103, 153, 8104, 125, 8105, 129,
+ 8106, 133, 8107, 137, 8108, 141,
8109, 145, 8110, 149, 8111, 153,
1073749936, 32, 8113, 32, 8114, 157,
- 8115, 161, 8116, 165, // NOLINT
- 8118, 169, 8119, 173, 8124, 161,
- 8126, -28820, 8130, 177, 8131, 181,
- 8132, 185, 8134, 189, // NOLINT
- 8135, 193, 8140, 181, 1073749968, 32,
- 8145, 32, 8146, 197, 8147, 13,
- 8150, 201, 8151, 205, // NOLINT
+ 8115, 161, 8116, 165, 8118, 169,
+ 8119, 173, 8124, 161, 8126, -28820,
+ 8130, 177, 8131, 181, 8132, 185,
+ 8134, 189, 8135, 193, 8140, 181,
+ 1073749968, 32, 8145, 32, 8146, 197,
+ 8147, 13, 8150, 201, 8151, 205,
1073749984, 32, 8161, 32, 8162, 209,
8163, 17, 8164, 213, 8165, 28,
- 8166, 217, 8167, 221, // NOLINT
- 8178, 225, 8179, 229, 8180, 233,
- 8182, 237, 8183, 241, 8188, 229}; // NOLINT
-static const uint16_t kToUppercaseMultiStrings0Size = 62; // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kToUppercaseTable1Size = 73; // NOLINT
+ 8166, 217, 8167, 221, 8178, 225,
+ 8179, 229, 8180, 233, 8182, 237,
+ 8183, 241, 8188, 229};
+static const uint16_t kToUppercaseMultiStrings0Size = 62;
+static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings1[1] = {
+ {{kSentinel}}};
+static const uint16_t kToUppercaseTable1Size = 73;
static const int32_t kToUppercaseTable1[146] = {
- 334, -112, 1073742192, -64, 383, -64,
- 388, -4, 1073743056, -104, 1257, -104,
- 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168,
- 3176, -4, 3178, -4, 3180, -4,
- 3187, -4, 3190, -4, // NOLINT
- 3201, -4, 3203, -4, 3205, -4,
- 3207, -4, 3209, -4, 3211, -4,
- 3213, -4, 3215, -4, // NOLINT
- 3217, -4, 3219, -4, 3221, -4,
- 3223, -4, 3225, -4, 3227, -4,
- 3229, -4, 3231, -4, // NOLINT
- 3233, -4, 3235, -4, 3237, -4,
- 3239, -4, 3241, -4, 3243, -4,
- 3245, -4, 3247, -4, // NOLINT
- 3249, -4, 3251, -4, 3253, -4,
- 3255, -4, 3257, -4, 3259, -4,
- 3261, -4, 3263, -4, // NOLINT
- 3265, -4, 3267, -4, 3269, -4,
- 3271, -4, 3273, -4, 3275, -4,
- 3277, -4, 3279, -4, // NOLINT
- 3281, -4, 3283, -4, 3285, -4,
- 3287, -4, 3289, -4, 3291, -4,
- 3293, -4, 3295, -4, // NOLINT
- 3297, -4, 3299, -4, 3308, -4,
- 3310, -4, 3315, -4, 1073745152, -29056,
- 3365, -29056, 3367, -29056, // NOLINT
- 3373, -29056}; // NOLINT
-static const uint16_t kToUppercaseMultiStrings1Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings5[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kToUppercaseTable5Size = 95; // NOLINT
-static const int32_t
- kToUppercaseTable5[190] = {1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4,
- 1611, -4, 1613, -4, 1615, -4, // NOLINT
- 1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4,
- 1627, -4, 1629, -4, 1631, -4, // NOLINT
- 1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4,
- 1643, -4, 1645, -4, 1665, -4, // NOLINT
- 1667, -4, 1669, -4, 1671, -4, 1673, -4, 1675, -4,
- 1677, -4, 1679, -4, 1681, -4, // NOLINT
- 1683, -4, 1685, -4, 1687, -4, 1689, -4, 1691, -4,
- 1827, -4, 1829, -4, 1831, -4, // NOLINT
- 1833, -4, 1835, -4, 1837, -4, 1839, -4, 1843, -4,
- 1845, -4, 1847, -4, 1849, -4, // NOLINT
- 1851, -4, 1853, -4, 1855, -4, 1857, -4, 1859, -4,
- 1861, -4, 1863, -4, 1865, -4, // NOLINT
- 1867, -4, 1869, -4, 1871, -4, 1873, -4, 1875, -4,
- 1877, -4, 1879, -4, 1881, -4, // NOLINT
- 1883, -4, 1885, -4, 1887, -4, 1889, -4, 1891, -4,
- 1893, -4, 1895, -4, 1897, -4, // NOLINT
- 1899, -4, 1901, -4, 1903, -4, 1914, -4, 1916, -4,
- 1919, -4, 1921, -4, 1923, -4, // NOLINT
- 1925, -4, 1927, -4, 1932, -4, 1937, -4, 1939, -4,
- 1943, -4, 1945, -4, 1947, -4, // NOLINT
- 1949, -4, 1951, -4, 1953, -4, 1955, -4, 1957, -4,
- 1959, -4, 1961, -4}; // NOLINT
-static const uint16_t kToUppercaseMultiStrings5Size = 1; // NOLINT
-static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] =
- { // NOLINT
- {{70, 70, kSentinel}},
- {{70, 73, kSentinel}},
- {{70, 76, kSentinel}},
- {{70, 70, 73}}, // NOLINT
- {{70, 70, 76}},
- {{83, 84, kSentinel}},
- {{1348, 1350, kSentinel}},
- {{1348, 1333, kSentinel}}, // NOLINT
- {{1348, 1339, kSentinel}},
- {{1358, 1350, kSentinel}},
- {{1348, 1341, kSentinel}},
- {{kSentinel}}}; // NOLINT
-static const uint16_t kToUppercaseTable7Size = 14; // NOLINT
-static const int32_t kToUppercaseTable7[28] =
- {6912, 1, 6913, 5, 6914, 9, 6915, 13,
- 6916, 17, 6917, 21, 6918, 21, 6931, 25, // NOLINT
- 6932, 29, 6933, 33, 6934, 37, 6935, 41,
- 1073749825, -128, 8026, -128}; // NOLINT
-static const uint16_t kToUppercaseMultiStrings7Size = 12; // NOLINT
+ 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104,
+ 1257, -104, 1073744944, -192, 3166, -192, 3169, -4, 3173, -43180,
+ 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4,
+ 3190, -4, 3201, -4, 3203, -4, 3205, -4, 3207, -4,
+ 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4,
+ 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4,
+ 3229, -4, 3231, -4, 3233, -4, 3235, -4, 3237, -4,
+ 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4,
+ 3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4,
+ 3259, -4, 3261, -4, 3263, -4, 3265, -4, 3267, -4,
+ 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4,
+ 3279, -4, 3281, -4, 3283, -4, 3285, -4, 3287, -4,
+ 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4,
+ 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056,
+ 3365, -29056, 3367, -29056, 3373, -29056};
+static const uint16_t kToUppercaseMultiStrings1Size = 1;
+static const MultiCharacterSpecialCase<1> kToUppercaseMultiStrings5[1] = {
+ {{kSentinel}}};
+static const uint16_t kToUppercaseTable5Size = 95;
+static const int32_t kToUppercaseTable5[190] = {
+ 1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4,
+ 1615, -4, 1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4,
+ 1629, -4, 1631, -4, 1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4,
+ 1643, -4, 1645, -4, 1665, -4, 1667, -4, 1669, -4, 1671, -4, 1673, -4,
+ 1675, -4, 1677, -4, 1679, -4, 1681, -4, 1683, -4, 1685, -4, 1687, -4,
+ 1689, -4, 1691, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4,
+ 1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4,
+ 1853, -4, 1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4,
+ 1867, -4, 1869, -4, 1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4,
+ 1881, -4, 1883, -4, 1885, -4, 1887, -4, 1889, -4, 1891, -4, 1893, -4,
+ 1895, -4, 1897, -4, 1899, -4, 1901, -4, 1903, -4, 1914, -4, 1916, -4,
+ 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4, 1932, -4, 1937, -4,
+ 1939, -4, 1943, -4, 1945, -4, 1947, -4, 1949, -4, 1951, -4, 1953, -4,
+ 1955, -4, 1957, -4, 1959, -4, 1961, -4};
+static const uint16_t kToUppercaseMultiStrings5Size = 1;
+static const MultiCharacterSpecialCase<3> kToUppercaseMultiStrings7[12] = {
+ {{70, 70, kSentinel}},
+ {{70, 73, kSentinel}},
+ {{70, 76, kSentinel}},
+ {{70, 70, 73}},
+ {{70, 70, 76}},
+ {{83, 84, kSentinel}},
+ {{1348, 1350, kSentinel}},
+ {{1348, 1333, kSentinel}},
+ {{1348, 1339, kSentinel}},
+ {{1358, 1350, kSentinel}},
+ {{1348, 1341, kSentinel}},
+ {{kSentinel}}};
+static const uint16_t kToUppercaseTable7Size = 14;
+static const int32_t kToUppercaseTable7[28] = {
+ 6912, 1, 6913, 5, 6914, 9, 6915, 13, 6916, 17,
+ 6917, 21, 6918, 21, 6931, 25, 6932, 29, 6933, 33,
+ 6934, 37, 6935, 41, 1073749825, -128, 8026, -128};
+static const uint16_t kToUppercaseMultiStrings7Size = 12;
int ToUppercase::Convert(uchar c, uchar n, uchar* result,
bool* allow_caching_ptr) {
int chunk_index = c >> 13;
@@ -1653,269 +1325,221 @@ int ToUppercase::Convert(uchar c, uchar n, uchar* result,
}
static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings0[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable0Size = 498; // NOLINT
+ {{{kSentinel}}};
+static const uint16_t kEcma262CanonicalizeTable0Size = 498;
static const int32_t kEcma262CanonicalizeTable0[996] = {
1073741921, -128, 122, -128, 181, 2972,
1073742048, -128, 246, -128, 1073742072, -128,
- 254, -128, 255, 484, // NOLINT
- 257, -4, 259, -4, 261, -4,
- 263, -4, 265, -4, 267, -4,
- 269, -4, 271, -4, // NOLINT
- 273, -4, 275, -4, 277, -4,
- 279, -4, 281, -4, 283, -4,
- 285, -4, 287, -4, // NOLINT
+ 254, -128, 255, 484, 257, -4,
+ 259, -4, 261, -4, 263, -4,
+ 265, -4, 267, -4, 269, -4,
+ 271, -4, 273, -4, 275, -4,
+ 277, -4, 279, -4, 281, -4,
+ 283, -4, 285, -4, 287, -4,
289, -4, 291, -4, 293, -4,
295, -4, 297, -4, 299, -4,
- 301, -4, 303, -4, // NOLINT
- 307, -4, 309, -4, 311, -4,
- 314, -4, 316, -4, 318, -4,
- 320, -4, 322, -4, // NOLINT
- 324, -4, 326, -4, 328, -4,
- 331, -4, 333, -4, 335, -4,
- 337, -4, 339, -4, // NOLINT
+ 301, -4, 303, -4, 307, -4,
+ 309, -4, 311, -4, 314, -4,
+ 316, -4, 318, -4, 320, -4,
+ 322, -4, 324, -4, 326, -4,
+ 328, -4, 331, -4, 333, -4,
+ 335, -4, 337, -4, 339, -4,
341, -4, 343, -4, 345, -4,
347, -4, 349, -4, 351, -4,
- 353, -4, 355, -4, // NOLINT
- 357, -4, 359, -4, 361, -4,
- 363, -4, 365, -4, 367, -4,
- 369, -4, 371, -4, // NOLINT
- 373, -4, 375, -4, 378, -4,
- 380, -4, 382, -4, 384, 780,
- 387, -4, 389, -4, // NOLINT
+ 353, -4, 355, -4, 357, -4,
+ 359, -4, 361, -4, 363, -4,
+ 365, -4, 367, -4, 369, -4,
+ 371, -4, 373, -4, 375, -4,
+ 378, -4, 380, -4, 382, -4,
+ 384, 780, 387, -4, 389, -4,
392, -4, 396, -4, 402, -4,
405, 388, 409, -4, 410, 652,
- 414, 520, 417, -4, // NOLINT
- 419, -4, 421, -4, 424, -4,
- 429, -4, 432, -4, 436, -4,
- 438, -4, 441, -4, // NOLINT
- 445, -4, 447, 224, 453, -4,
- 454, -8, 456, -4, 457, -8,
- 459, -4, 460, -8, // NOLINT
+ 414, 520, 417, -4, 419, -4,
+ 421, -4, 424, -4, 429, -4,
+ 432, -4, 436, -4, 438, -4,
+ 441, -4, 445, -4, 447, 224,
+ 453, -4, 454, -8, 456, -4,
+ 457, -8, 459, -4, 460, -8,
462, -4, 464, -4, 466, -4,
468, -4, 470, -4, 472, -4,
- 474, -4, 476, -4, // NOLINT
- 477, -316, 479, -4, 481, -4,
- 483, -4, 485, -4, 487, -4,
- 489, -4, 491, -4, // NOLINT
- 493, -4, 495, -4, 498, -4,
- 499, -8, 501, -4, 505, -4,
- 507, -4, 509, -4, // NOLINT
+ 474, -4, 476, -4, 477, -316,
+ 479, -4, 481, -4, 483, -4,
+ 485, -4, 487, -4, 489, -4,
+ 491, -4, 493, -4, 495, -4,
+ 498, -4, 499, -8, 501, -4,
+ 505, -4, 507, -4, 509, -4,
511, -4, 513, -4, 515, -4,
517, -4, 519, -4, 521, -4,
- 523, -4, 525, -4, // NOLINT
- 527, -4, 529, -4, 531, -4,
- 533, -4, 535, -4, 537, -4,
- 539, -4, 541, -4, // NOLINT
- 543, -4, 547, -4, 549, -4,
- 551, -4, 553, -4, 555, -4,
- 557, -4, 559, -4, // NOLINT
+ 523, -4, 525, -4, 527, -4,
+ 529, -4, 531, -4, 533, -4,
+ 535, -4, 537, -4, 539, -4,
+ 541, -4, 543, -4, 547, -4,
+ 549, -4, 551, -4, 553, -4,
+ 555, -4, 557, -4, 559, -4,
561, -4, 563, -4, 572, -4,
1073742399, 43260, 576, 43260, 578, -4,
- 583, -4, 585, -4, // NOLINT
- 587, -4, 589, -4, 591, -4,
- 592, 43132, 593, 43120, 594, 43128,
- 595, -840, 596, -824, // NOLINT
- 1073742422, -820, 599, -820, 601, -808,
- 603, -812, 604, 169276, 608, -820,
- 609, 169260, 611, -828, // NOLINT
+ 583, -4, 585, -4, 587, -4,
+ 589, -4, 591, -4, 592, 43132,
+ 593, 43120, 594, 43128, 595, -840,
+ 596, -824, 1073742422, -820, 599, -820,
+ 601, -808, 603, -812, 604, 169276,
+ 608, -820, 609, 169260, 611, -828,
613, 169120, 614, 169232, 616, -836,
617, -844, 619, 42972, 620, 169220,
- 623, -844, 625, 42996, // NOLINT
- 626, -852, 629, -856, 637, 42908,
- 640, -872, 643, -872, 647, 169128,
- 648, -872, 649, -276, // NOLINT
- 1073742474, -868, 651, -868, 652, -284,
- 658, -876, 670, 169032, 837, 336,
- 881, -4, 883, -4, // NOLINT
+ 623, -844, 625, 42996, 626, -852,
+ 629, -856, 637, 42908, 640, -872,
+ 643, -872, 647, 169128, 648, -872,
+ 649, -276, 1073742474, -868, 651, -868,
+ 652, -284, 658, -876, 670, 169032,
+ 837, 336, 881, -4, 883, -4,
887, -4, 1073742715, 520, 893, 520,
940, -152, 1073742765, -148, 943, -148,
- 1073742769, -128, 961, -128, // NOLINT
- 962, -124, 1073742787, -128, 971, -128,
- 972, -256, 1073742797, -252, 974, -252,
- 976, -248, 977, -228, // NOLINT
- 981, -188, 982, -216, 983, -32,
- 985, -4, 987, -4, 989, -4,
- 991, -4, 993, -4, // NOLINT
+ 1073742769, -128, 961, -128, 962, -124,
+ 1073742787, -128, 971, -128, 972, -256,
+ 1073742797, -252, 974, -252, 976, -248,
+ 977, -228, 981, -188, 982, -216,
+ 983, -32, 985, -4, 987, -4,
+ 989, -4, 991, -4, 993, -4,
995, -4, 997, -4, 999, -4,
1001, -4, 1003, -4, 1005, -4,
- 1007, -4, 1008, -344, // NOLINT
- 1009, -320, 1010, 28, 1011, -464,
- 1013, -384, 1016, -4, 1019, -4,
- 1073742896, -128, 1103, -128, // NOLINT
- 1073742928, -320, 1119, -320, 1121, -4,
- 1123, -4, 1125, -4, 1127, -4,
- 1129, -4, 1131, -4, // NOLINT
+ 1007, -4, 1008, -344, 1009, -320,
+ 1010, 28, 1011, -464, 1013, -384,
+ 1016, -4, 1019, -4, 1073742896, -128,
+ 1103, -128, 1073742928, -320, 1119, -320,
+ 1121, -4, 1123, -4, 1125, -4,
+ 1127, -4, 1129, -4, 1131, -4,
1133, -4, 1135, -4, 1137, -4,
1139, -4, 1141, -4, 1143, -4,
- 1145, -4, 1147, -4, // NOLINT
- 1149, -4, 1151, -4, 1153, -4,
- 1163, -4, 1165, -4, 1167, -4,
- 1169, -4, 1171, -4, // NOLINT
- 1173, -4, 1175, -4, 1177, -4,
- 1179, -4, 1181, -4, 1183, -4,
- 1185, -4, 1187, -4, // NOLINT
+ 1145, -4, 1147, -4, 1149, -4,
+ 1151, -4, 1153, -4, 1163, -4,
+ 1165, -4, 1167, -4, 1169, -4,
+ 1171, -4, 1173, -4, 1175, -4,
+ 1177, -4, 1179, -4, 1181, -4,
+ 1183, -4, 1185, -4, 1187, -4,
1189, -4, 1191, -4, 1193, -4,
1195, -4, 1197, -4, 1199, -4,
- 1201, -4, 1203, -4, // NOLINT
- 1205, -4, 1207, -4, 1209, -4,
- 1211, -4, 1213, -4, 1215, -4,
- 1218, -4, 1220, -4, // NOLINT
- 1222, -4, 1224, -4, 1226, -4,
- 1228, -4, 1230, -4, 1231, -60,
- 1233, -4, 1235, -4, // NOLINT
+ 1201, -4, 1203, -4, 1205, -4,
+ 1207, -4, 1209, -4, 1211, -4,
+ 1213, -4, 1215, -4, 1218, -4,
+ 1220, -4, 1222, -4, 1224, -4,
+ 1226, -4, 1228, -4, 1230, -4,
+ 1231, -60, 1233, -4, 1235, -4,
1237, -4, 1239, -4, 1241, -4,
1243, -4, 1245, -4, 1247, -4,
- 1249, -4, 1251, -4, // NOLINT
- 1253, -4, 1255, -4, 1257, -4,
- 1259, -4, 1261, -4, 1263, -4,
- 1265, -4, 1267, -4, // NOLINT
- 1269, -4, 1271, -4, 1273, -4,
- 1275, -4, 1277, -4, 1279, -4,
- 1281, -4, 1283, -4, // NOLINT
+ 1249, -4, 1251, -4, 1253, -4,
+ 1255, -4, 1257, -4, 1259, -4,
+ 1261, -4, 1263, -4, 1265, -4,
+ 1267, -4, 1269, -4, 1271, -4,
+ 1273, -4, 1275, -4, 1277, -4,
+ 1279, -4, 1281, -4, 1283, -4,
1285, -4, 1287, -4, 1289, -4,
1291, -4, 1293, -4, 1295, -4,
- 1297, -4, 1299, -4, // NOLINT
- 1301, -4, 1303, -4, 1305, -4,
- 1307, -4, 1309, -4, 1311, -4,
- 1313, -4, 1315, -4, // NOLINT
- 1317, -4, 1319, -4, 1321, -4,
- 1323, -4, 1325, -4, 1327, -4,
- 1073743201, -192, 1414, -192, // NOLINT
+ 1297, -4, 1299, -4, 1301, -4,
+ 1303, -4, 1305, -4, 1307, -4,
+ 1309, -4, 1311, -4, 1313, -4,
+ 1315, -4, 1317, -4, 1319, -4,
+ 1321, -4, 1323, -4, 1325, -4,
+ 1327, -4, 1073743201, -192, 1414, -192,
7545, 141328, 7549, 15256, 7681, -4,
7683, -4, 7685, -4, 7687, -4,
- 7689, -4, 7691, -4, // NOLINT
- 7693, -4, 7695, -4, 7697, -4,
- 7699, -4, 7701, -4, 7703, -4,
- 7705, -4, 7707, -4, // NOLINT
- 7709, -4, 7711, -4, 7713, -4,
- 7715, -4, 7717, -4, 7719, -4,
- 7721, -4, 7723, -4, // NOLINT
+ 7689, -4, 7691, -4, 7693, -4,
+ 7695, -4, 7697, -4, 7699, -4,
+ 7701, -4, 7703, -4, 7705, -4,
+ 7707, -4, 7709, -4, 7711, -4,
+ 7713, -4, 7715, -4, 7717, -4,
+ 7719, -4, 7721, -4, 7723, -4,
7725, -4, 7727, -4, 7729, -4,
7731, -4, 7733, -4, 7735, -4,
- 7737, -4, 7739, -4, // NOLINT
- 7741, -4, 7743, -4, 7745, -4,
- 7747, -4, 7749, -4, 7751, -4,
- 7753, -4, 7755, -4, // NOLINT
- 7757, -4, 7759, -4, 7761, -4,
- 7763, -4, 7765, -4, 7767, -4,
- 7769, -4, 7771, -4, // NOLINT
+ 7737, -4, 7739, -4, 7741, -4,
+ 7743, -4, 7745, -4, 7747, -4,
+ 7749, -4, 7751, -4, 7753, -4,
+ 7755, -4, 7757, -4, 7759, -4,
+ 7761, -4, 7763, -4, 7765, -4,
+ 7767, -4, 7769, -4, 7771, -4,
7773, -4, 7775, -4, 7777, -4,
7779, -4, 7781, -4, 7783, -4,
- 7785, -4, 7787, -4, // NOLINT
- 7789, -4, 7791, -4, 7793, -4,
- 7795, -4, 7797, -4, 7799, -4,
- 7801, -4, 7803, -4, // NOLINT
- 7805, -4, 7807, -4, 7809, -4,
- 7811, -4, 7813, -4, 7815, -4,
- 7817, -4, 7819, -4, // NOLINT
+ 7785, -4, 7787, -4, 7789, -4,
+ 7791, -4, 7793, -4, 7795, -4,
+ 7797, -4, 7799, -4, 7801, -4,
+ 7803, -4, 7805, -4, 7807, -4,
+ 7809, -4, 7811, -4, 7813, -4,
+ 7815, -4, 7817, -4, 7819, -4,
7821, -4, 7823, -4, 7825, -4,
7827, -4, 7829, -4, 7835, -236,
- 7841, -4, 7843, -4, // NOLINT
- 7845, -4, 7847, -4, 7849, -4,
- 7851, -4, 7853, -4, 7855, -4,
- 7857, -4, 7859, -4, // NOLINT
- 7861, -4, 7863, -4, 7865, -4,
- 7867, -4, 7869, -4, 7871, -4,
- 7873, -4, 7875, -4, // NOLINT
+ 7841, -4, 7843, -4, 7845, -4,
+ 7847, -4, 7849, -4, 7851, -4,
+ 7853, -4, 7855, -4, 7857, -4,
+ 7859, -4, 7861, -4, 7863, -4,
+ 7865, -4, 7867, -4, 7869, -4,
+ 7871, -4, 7873, -4, 7875, -4,
7877, -4, 7879, -4, 7881, -4,
7883, -4, 7885, -4, 7887, -4,
- 7889, -4, 7891, -4, // NOLINT
- 7893, -4, 7895, -4, 7897, -4,
- 7899, -4, 7901, -4, 7903, -4,
- 7905, -4, 7907, -4, // NOLINT
- 7909, -4, 7911, -4, 7913, -4,
- 7915, -4, 7917, -4, 7919, -4,
- 7921, -4, 7923, -4, // NOLINT
+ 7889, -4, 7891, -4, 7893, -4,
+ 7895, -4, 7897, -4, 7899, -4,
+ 7901, -4, 7903, -4, 7905, -4,
+ 7907, -4, 7909, -4, 7911, -4,
+ 7913, -4, 7915, -4, 7917, -4,
+ 7919, -4, 7921, -4, 7923, -4,
7925, -4, 7927, -4, 7929, -4,
7931, -4, 7933, -4, 7935, -4,
- 1073749760, 32, 7943, 32, // NOLINT
- 1073749776, 32, 7957, 32, 1073749792, 32,
- 7975, 32, 1073749808, 32, 7991, 32,
- 1073749824, 32, 8005, 32, // NOLINT
- 8017, 32, 8019, 32, 8021, 32,
- 8023, 32, 1073749856, 32, 8039, 32,
- 1073749872, 296, 8049, 296, // NOLINT
+ 1073749760, 32, 7943, 32, 1073749776, 32,
+ 7957, 32, 1073749792, 32, 7975, 32,
+ 1073749808, 32, 7991, 32, 1073749824, 32,
+ 8005, 32, 8017, 32, 8019, 32,
+ 8021, 32, 8023, 32, 1073749856, 32,
+ 8039, 32, 1073749872, 296, 8049, 296,
1073749874, 344, 8053, 344, 1073749878, 400,
8055, 400, 1073749880, 512, 8057, 512,
- 1073749882, 448, 8059, 448, // NOLINT
- 1073749884, 504, 8061, 504, 1073749936, 32,
- 8113, 32, 8126, -28820, 1073749968, 32,
- 8145, 32, 1073749984, 32, // NOLINT
- 8161, 32, 8165, 28}; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings0Size = 1; // NOLINT
+ 1073749882, 448, 8059, 448, 1073749884, 504,
+ 8061, 504, 1073749936, 32, 8113, 32,
+ 8126, -28820, 1073749968, 32, 8145, 32,
+ 1073749984, 32, 8161, 32, 8165, 28};
+static const uint16_t kEcma262CanonicalizeMultiStrings0Size = 1;
static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings1[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable1Size = 73; // NOLINT
+ {{{kSentinel}}};
+static const uint16_t kEcma262CanonicalizeTable1Size = 73;
static const int32_t kEcma262CanonicalizeTable1[146] = {
- 334, -112, 1073742192, -64, 383, -64,
- 388, -4, 1073743056, -104, 1257, -104,
- 1073744944, -192, 3166, -192, // NOLINT
- 3169, -4, 3173, -43180, 3174, -43168,
- 3176, -4, 3178, -4, 3180, -4,
- 3187, -4, 3190, -4, // NOLINT
- 3201, -4, 3203, -4, 3205, -4,
- 3207, -4, 3209, -4, 3211, -4,
- 3213, -4, 3215, -4, // NOLINT
- 3217, -4, 3219, -4, 3221, -4,
- 3223, -4, 3225, -4, 3227, -4,
- 3229, -4, 3231, -4, // NOLINT
- 3233, -4, 3235, -4, 3237, -4,
- 3239, -4, 3241, -4, 3243, -4,
- 3245, -4, 3247, -4, // NOLINT
- 3249, -4, 3251, -4, 3253, -4,
- 3255, -4, 3257, -4, 3259, -4,
- 3261, -4, 3263, -4, // NOLINT
- 3265, -4, 3267, -4, 3269, -4,
- 3271, -4, 3273, -4, 3275, -4,
- 3277, -4, 3279, -4, // NOLINT
- 3281, -4, 3283, -4, 3285, -4,
- 3287, -4, 3289, -4, 3291, -4,
- 3293, -4, 3295, -4, // NOLINT
- 3297, -4, 3299, -4, 3308, -4,
- 3310, -4, 3315, -4, 1073745152, -29056,
- 3365, -29056, 3367, -29056, // NOLINT
- 3373, -29056}; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings1Size = 1; // NOLINT
+ 334, -112, 1073742192, -64, 383, -64, 388, -4, 1073743056, -104,
+ 1257, -104, 1073744944, -192, 3166, -192, 3169, -4, 3173, -43180,
+ 3174, -43168, 3176, -4, 3178, -4, 3180, -4, 3187, -4,
+ 3190, -4, 3201, -4, 3203, -4, 3205, -4, 3207, -4,
+ 3209, -4, 3211, -4, 3213, -4, 3215, -4, 3217, -4,
+ 3219, -4, 3221, -4, 3223, -4, 3225, -4, 3227, -4,
+ 3229, -4, 3231, -4, 3233, -4, 3235, -4, 3237, -4,
+ 3239, -4, 3241, -4, 3243, -4, 3245, -4, 3247, -4,
+ 3249, -4, 3251, -4, 3253, -4, 3255, -4, 3257, -4,
+ 3259, -4, 3261, -4, 3263, -4, 3265, -4, 3267, -4,
+ 3269, -4, 3271, -4, 3273, -4, 3275, -4, 3277, -4,
+ 3279, -4, 3281, -4, 3283, -4, 3285, -4, 3287, -4,
+ 3289, -4, 3291, -4, 3293, -4, 3295, -4, 3297, -4,
+ 3299, -4, 3308, -4, 3310, -4, 3315, -4, 1073745152, -29056,
+ 3365, -29056, 3367, -29056, 3373, -29056};
+static const uint16_t kEcma262CanonicalizeMultiStrings1Size = 1;
static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings5[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable5Size = 95; // NOLINT
+ {{{kSentinel}}};
+static const uint16_t kEcma262CanonicalizeTable5Size = 95;
static const int32_t kEcma262CanonicalizeTable5[190] = {
- 1601, -4, 1603, -4, 1605, -4, 1607, -4,
- 1609, -4, 1611, -4, 1613, -4, 1615, -4, // NOLINT
- 1617, -4, 1619, -4, 1621, -4, 1623, -4,
- 1625, -4, 1627, -4, 1629, -4, 1631, -4, // NOLINT
- 1633, -4, 1635, -4, 1637, -4, 1639, -4,
- 1641, -4, 1643, -4, 1645, -4, 1665, -4, // NOLINT
- 1667, -4, 1669, -4, 1671, -4, 1673, -4,
- 1675, -4, 1677, -4, 1679, -4, 1681, -4, // NOLINT
- 1683, -4, 1685, -4, 1687, -4, 1689, -4,
- 1691, -4, 1827, -4, 1829, -4, 1831, -4, // NOLINT
- 1833, -4, 1835, -4, 1837, -4, 1839, -4,
- 1843, -4, 1845, -4, 1847, -4, 1849, -4, // NOLINT
- 1851, -4, 1853, -4, 1855, -4, 1857, -4,
- 1859, -4, 1861, -4, 1863, -4, 1865, -4, // NOLINT
- 1867, -4, 1869, -4, 1871, -4, 1873, -4,
- 1875, -4, 1877, -4, 1879, -4, 1881, -4, // NOLINT
- 1883, -4, 1885, -4, 1887, -4, 1889, -4,
- 1891, -4, 1893, -4, 1895, -4, 1897, -4, // NOLINT
- 1899, -4, 1901, -4, 1903, -4, 1914, -4,
- 1916, -4, 1919, -4, 1921, -4, 1923, -4, // NOLINT
- 1925, -4, 1927, -4, 1932, -4, 1937, -4,
- 1939, -4, 1943, -4, 1945, -4, 1947, -4, // NOLINT
- 1949, -4, 1951, -4, 1953, -4, 1955, -4,
- 1957, -4, 1959, -4, 1961, -4}; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings5Size = 1; // NOLINT
+ 1601, -4, 1603, -4, 1605, -4, 1607, -4, 1609, -4, 1611, -4, 1613, -4,
+ 1615, -4, 1617, -4, 1619, -4, 1621, -4, 1623, -4, 1625, -4, 1627, -4,
+ 1629, -4, 1631, -4, 1633, -4, 1635, -4, 1637, -4, 1639, -4, 1641, -4,
+ 1643, -4, 1645, -4, 1665, -4, 1667, -4, 1669, -4, 1671, -4, 1673, -4,
+ 1675, -4, 1677, -4, 1679, -4, 1681, -4, 1683, -4, 1685, -4, 1687, -4,
+ 1689, -4, 1691, -4, 1827, -4, 1829, -4, 1831, -4, 1833, -4, 1835, -4,
+ 1837, -4, 1839, -4, 1843, -4, 1845, -4, 1847, -4, 1849, -4, 1851, -4,
+ 1853, -4, 1855, -4, 1857, -4, 1859, -4, 1861, -4, 1863, -4, 1865, -4,
+ 1867, -4, 1869, -4, 1871, -4, 1873, -4, 1875, -4, 1877, -4, 1879, -4,
+ 1881, -4, 1883, -4, 1885, -4, 1887, -4, 1889, -4, 1891, -4, 1893, -4,
+ 1895, -4, 1897, -4, 1899, -4, 1901, -4, 1903, -4, 1914, -4, 1916, -4,
+ 1919, -4, 1921, -4, 1923, -4, 1925, -4, 1927, -4, 1932, -4, 1937, -4,
+ 1939, -4, 1943, -4, 1945, -4, 1947, -4, 1949, -4, 1951, -4, 1953, -4,
+ 1955, -4, 1957, -4, 1959, -4, 1961, -4};
+static const uint16_t kEcma262CanonicalizeMultiStrings5Size = 1;
static const MultiCharacterSpecialCase<1> kEcma262CanonicalizeMultiStrings7[1] =
- { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262CanonicalizeTable7Size = 2; // NOLINT
+ {{{kSentinel}}};
+static const uint16_t kEcma262CanonicalizeTable7Size = 2;
static const int32_t kEcma262CanonicalizeTable7[4] = {1073749825, -128, 8026,
- -128}; // NOLINT
-static const uint16_t kEcma262CanonicalizeMultiStrings7Size = 1; // NOLINT
+ -128};
+static const uint16_t kEcma262CanonicalizeMultiStrings7Size = 1;
int Ecma262Canonicalize::Convert(uchar c, uchar n, uchar* result,
bool* allow_caching_ptr) {
int chunk_index = c >> 13;
@@ -1942,1095 +1566,899 @@ int Ecma262Canonicalize::Convert(uchar c, uchar n, uchar* result,
}
static const MultiCharacterSpecialCase<4>
- kEcma262UnCanonicalizeMultiStrings0[507] = { // NOLINT
- {{65, 97, kSentinel}},
- {{90, 122, kSentinel}},
- {{181, 924, 956, kSentinel}},
- {{192, 224, kSentinel}}, // NOLINT
- {{214, 246, kSentinel}},
- {{216, 248, kSentinel}},
- {{222, 254, kSentinel}},
- {{255, 376, kSentinel}}, // NOLINT
- {{256, 257, kSentinel}},
- {{258, 259, kSentinel}},
- {{260, 261, kSentinel}},
- {{262, 263, kSentinel}}, // NOLINT
- {{264, 265, kSentinel}},
- {{266, 267, kSentinel}},
- {{268, 269, kSentinel}},
- {{270, 271, kSentinel}}, // NOLINT
- {{272, 273, kSentinel}},
- {{274, 275, kSentinel}},
- {{276, 277, kSentinel}},
- {{278, 279, kSentinel}}, // NOLINT
- {{280, 281, kSentinel}},
- {{282, 283, kSentinel}},
- {{284, 285, kSentinel}},
- {{286, 287, kSentinel}}, // NOLINT
- {{288, 289, kSentinel}},
- {{290, 291, kSentinel}},
- {{292, 293, kSentinel}},
- {{294, 295, kSentinel}}, // NOLINT
- {{296, 297, kSentinel}},
- {{298, 299, kSentinel}},
- {{300, 301, kSentinel}},
- {{302, 303, kSentinel}}, // NOLINT
- {{306, 307, kSentinel}},
- {{308, 309, kSentinel}},
- {{310, 311, kSentinel}},
- {{313, 314, kSentinel}}, // NOLINT
- {{315, 316, kSentinel}},
- {{317, 318, kSentinel}},
- {{319, 320, kSentinel}},
- {{321, 322, kSentinel}}, // NOLINT
- {{323, 324, kSentinel}},
- {{325, 326, kSentinel}},
- {{327, 328, kSentinel}},
- {{330, 331, kSentinel}}, // NOLINT
- {{332, 333, kSentinel}},
- {{334, 335, kSentinel}},
- {{336, 337, kSentinel}},
- {{338, 339, kSentinel}}, // NOLINT
- {{340, 341, kSentinel}},
- {{342, 343, kSentinel}},
- {{344, 345, kSentinel}},
- {{346, 347, kSentinel}}, // NOLINT
- {{348, 349, kSentinel}},
- {{350, 351, kSentinel}},
- {{352, 353, kSentinel}},
- {{354, 355, kSentinel}}, // NOLINT
- {{356, 357, kSentinel}},
- {{358, 359, kSentinel}},
- {{360, 361, kSentinel}},
- {{362, 363, kSentinel}}, // NOLINT
- {{364, 365, kSentinel}},
- {{366, 367, kSentinel}},
- {{368, 369, kSentinel}},
- {{370, 371, kSentinel}}, // NOLINT
- {{372, 373, kSentinel}},
- {{374, 375, kSentinel}},
- {{377, 378, kSentinel}},
- {{379, 380, kSentinel}}, // NOLINT
- {{381, 382, kSentinel}},
- {{384, 579, kSentinel}},
- {{385, 595, kSentinel}},
- {{386, 387, kSentinel}}, // NOLINT
- {{388, 389, kSentinel}},
- {{390, 596, kSentinel}},
- {{391, 392, kSentinel}},
- {{393, 598, kSentinel}}, // NOLINT
- {{394, 599, kSentinel}},
- {{395, 396, kSentinel}},
- {{398, 477, kSentinel}},
- {{399, 601, kSentinel}}, // NOLINT
- {{400, 603, kSentinel}},
- {{401, 402, kSentinel}},
- {{403, 608, kSentinel}},
- {{404, 611, kSentinel}}, // NOLINT
- {{405, 502, kSentinel}},
- {{406, 617, kSentinel}},
- {{407, 616, kSentinel}},
- {{408, 409, kSentinel}}, // NOLINT
- {{410, 573, kSentinel}},
- {{412, 623, kSentinel}},
- {{413, 626, kSentinel}},
- {{414, 544, kSentinel}}, // NOLINT
- {{415, 629, kSentinel}},
- {{416, 417, kSentinel}},
- {{418, 419, kSentinel}},
- {{420, 421, kSentinel}}, // NOLINT
- {{422, 640, kSentinel}},
- {{423, 424, kSentinel}},
- {{425, 643, kSentinel}},
- {{428, 429, kSentinel}}, // NOLINT
- {{430, 648, kSentinel}},
- {{431, 432, kSentinel}},
- {{433, 650, kSentinel}},
- {{434, 651, kSentinel}}, // NOLINT
- {{435, 436, kSentinel}},
- {{437, 438, kSentinel}},
- {{439, 658, kSentinel}},
- {{440, 441, kSentinel}}, // NOLINT
- {{444, 445, kSentinel}},
- {{447, 503, kSentinel}},
- {{452, 453, 454, kSentinel}},
- {{455, 456, 457, kSentinel}}, // NOLINT
- {{458, 459, 460, kSentinel}},
- {{461, 462, kSentinel}},
- {{463, 464, kSentinel}},
- {{465, 466, kSentinel}}, // NOLINT
- {{467, 468, kSentinel}},
- {{469, 470, kSentinel}},
- {{471, 472, kSentinel}},
- {{473, 474, kSentinel}}, // NOLINT
- {{475, 476, kSentinel}},
- {{478, 479, kSentinel}},
- {{480, 481, kSentinel}},
- {{482, 483, kSentinel}}, // NOLINT
- {{484, 485, kSentinel}},
- {{486, 487, kSentinel}},
- {{488, 489, kSentinel}},
- {{490, 491, kSentinel}}, // NOLINT
- {{492, 493, kSentinel}},
- {{494, 495, kSentinel}},
- {{497, 498, 499, kSentinel}},
- {{500, 501, kSentinel}}, // NOLINT
- {{504, 505, kSentinel}},
- {{506, 507, kSentinel}},
- {{508, 509, kSentinel}},
- {{510, 511, kSentinel}}, // NOLINT
- {{512, 513, kSentinel}},
- {{514, 515, kSentinel}},
- {{516, 517, kSentinel}},
- {{518, 519, kSentinel}}, // NOLINT
- {{520, 521, kSentinel}},
- {{522, 523, kSentinel}},
- {{524, 525, kSentinel}},
- {{526, 527, kSentinel}}, // NOLINT
- {{528, 529, kSentinel}},
- {{530, 531, kSentinel}},
- {{532, 533, kSentinel}},
- {{534, 535, kSentinel}}, // NOLINT
- {{536, 537, kSentinel}},
- {{538, 539, kSentinel}},
- {{540, 541, kSentinel}},
- {{542, 543, kSentinel}}, // NOLINT
- {{546, 547, kSentinel}},
- {{548, 549, kSentinel}},
- {{550, 551, kSentinel}},
- {{552, 553, kSentinel}}, // NOLINT
- {{554, 555, kSentinel}},
- {{556, 557, kSentinel}},
- {{558, 559, kSentinel}},
- {{560, 561, kSentinel}}, // NOLINT
- {{562, 563, kSentinel}},
- {{570, 11365, kSentinel}},
- {{571, 572, kSentinel}},
- {{574, 11366, kSentinel}}, // NOLINT
- {{575, 11390, kSentinel}},
- {{576, 11391, kSentinel}},
- {{577, 578, kSentinel}},
- {{580, 649, kSentinel}}, // NOLINT
- {{581, 652, kSentinel}},
- {{582, 583, kSentinel}},
- {{584, 585, kSentinel}},
- {{586, 587, kSentinel}}, // NOLINT
- {{588, 589, kSentinel}},
- {{590, 591, kSentinel}},
- {{592, 11375, kSentinel}},
- {{593, 11373, kSentinel}}, // NOLINT
- {{594, 11376, kSentinel}},
- {{604, 42923, kSentinel}},
- {{609, 42924, kSentinel}},
- {{613, 42893, kSentinel}}, // NOLINT
- {{614, 42922, kSentinel}},
- {{619, 11362, kSentinel}},
- {{620, 42925, kSentinel}},
- {{625, 11374, kSentinel}}, // NOLINT
- {{637, 11364, kSentinel}},
- {{647, 42929, kSentinel}},
- {{670, 42928, kSentinel}},
- {{837, 921, 953, 8126}}, // NOLINT
- {{880, 881, kSentinel}},
- {{882, 883, kSentinel}},
- {{886, 887, kSentinel}},
- {{891, 1021, kSentinel}}, // NOLINT
- {{893, 1023, kSentinel}},
- {{895, 1011, kSentinel}},
- {{902, 940, kSentinel}},
- {{904, 941, kSentinel}}, // NOLINT
- {{906, 943, kSentinel}},
- {{908, 972, kSentinel}},
- {{910, 973, kSentinel}},
- {{911, 974, kSentinel}}, // NOLINT
- {{913, 945, kSentinel}},
- {{914, 946, 976, kSentinel}},
- {{915, 947, kSentinel}},
- {{916, 948, kSentinel}}, // NOLINT
- {{917, 949, 1013, kSentinel}},
- {{918, 950, kSentinel}},
- {{919, 951, kSentinel}},
- {{920, 952, 977, kSentinel}}, // NOLINT
- {{922, 954, 1008, kSentinel}},
- {{923, 955, kSentinel}},
- {{925, 957, kSentinel}},
- {{927, 959, kSentinel}}, // NOLINT
- {{928, 960, 982, kSentinel}},
- {{929, 961, 1009, kSentinel}},
- {{931, 962, 963, kSentinel}},
- {{932, 964, kSentinel}}, // NOLINT
- {{933, 965, kSentinel}},
- {{934, 966, 981, kSentinel}},
- {{935, 967, kSentinel}},
- {{939, 971, kSentinel}}, // NOLINT
- {{975, 983, kSentinel}},
- {{984, 985, kSentinel}},
- {{986, 987, kSentinel}},
- {{988, 989, kSentinel}}, // NOLINT
- {{990, 991, kSentinel}},
- {{992, 993, kSentinel}},
- {{994, 995, kSentinel}},
- {{996, 997, kSentinel}}, // NOLINT
- {{998, 999, kSentinel}},
- {{1000, 1001, kSentinel}},
- {{1002, 1003, kSentinel}},
- {{1004, 1005, kSentinel}}, // NOLINT
- {{1006, 1007, kSentinel}},
- {{1010, 1017, kSentinel}},
- {{1015, 1016, kSentinel}},
- {{1018, 1019, kSentinel}}, // NOLINT
- {{1024, 1104, kSentinel}},
- {{1039, 1119, kSentinel}},
- {{1040, 1072, kSentinel}},
- {{1071, 1103, kSentinel}}, // NOLINT
- {{1120, 1121, kSentinel}},
- {{1122, 1123, kSentinel}},
- {{1124, 1125, kSentinel}},
- {{1126, 1127, kSentinel}}, // NOLINT
- {{1128, 1129, kSentinel}},
- {{1130, 1131, kSentinel}},
- {{1132, 1133, kSentinel}},
- {{1134, 1135, kSentinel}}, // NOLINT
- {{1136, 1137, kSentinel}},
- {{1138, 1139, kSentinel}},
- {{1140, 1141, kSentinel}},
- {{1142, 1143, kSentinel}}, // NOLINT
- {{1144, 1145, kSentinel}},
- {{1146, 1147, kSentinel}},
- {{1148, 1149, kSentinel}},
- {{1150, 1151, kSentinel}}, // NOLINT
- {{1152, 1153, kSentinel}},
- {{1162, 1163, kSentinel}},
- {{1164, 1165, kSentinel}},
- {{1166, 1167, kSentinel}}, // NOLINT
- {{1168, 1169, kSentinel}},
- {{1170, 1171, kSentinel}},
- {{1172, 1173, kSentinel}},
- {{1174, 1175, kSentinel}}, // NOLINT
- {{1176, 1177, kSentinel}},
- {{1178, 1179, kSentinel}},
- {{1180, 1181, kSentinel}},
- {{1182, 1183, kSentinel}}, // NOLINT
- {{1184, 1185, kSentinel}},
- {{1186, 1187, kSentinel}},
- {{1188, 1189, kSentinel}},
- {{1190, 1191, kSentinel}}, // NOLINT
- {{1192, 1193, kSentinel}},
- {{1194, 1195, kSentinel}},
- {{1196, 1197, kSentinel}},
- {{1198, 1199, kSentinel}}, // NOLINT
- {{1200, 1201, kSentinel}},
- {{1202, 1203, kSentinel}},
- {{1204, 1205, kSentinel}},
- {{1206, 1207, kSentinel}}, // NOLINT
- {{1208, 1209, kSentinel}},
- {{1210, 1211, kSentinel}},
- {{1212, 1213, kSentinel}},
- {{1214, 1215, kSentinel}}, // NOLINT
- {{1216, 1231, kSentinel}},
- {{1217, 1218, kSentinel}},
- {{1219, 1220, kSentinel}},
- {{1221, 1222, kSentinel}}, // NOLINT
- {{1223, 1224, kSentinel}},
- {{1225, 1226, kSentinel}},
- {{1227, 1228, kSentinel}},
- {{1229, 1230, kSentinel}}, // NOLINT
- {{1232, 1233, kSentinel}},
- {{1234, 1235, kSentinel}},
- {{1236, 1237, kSentinel}},
- {{1238, 1239, kSentinel}}, // NOLINT
- {{1240, 1241, kSentinel}},
- {{1242, 1243, kSentinel}},
- {{1244, 1245, kSentinel}},
- {{1246, 1247, kSentinel}}, // NOLINT
- {{1248, 1249, kSentinel}},
- {{1250, 1251, kSentinel}},
- {{1252, 1253, kSentinel}},
- {{1254, 1255, kSentinel}}, // NOLINT
- {{1256, 1257, kSentinel}},
- {{1258, 1259, kSentinel}},
- {{1260, 1261, kSentinel}},
- {{1262, 1263, kSentinel}}, // NOLINT
- {{1264, 1265, kSentinel}},
- {{1266, 1267, kSentinel}},
- {{1268, 1269, kSentinel}},
- {{1270, 1271, kSentinel}}, // NOLINT
- {{1272, 1273, kSentinel}},
- {{1274, 1275, kSentinel}},
- {{1276, 1277, kSentinel}},
- {{1278, 1279, kSentinel}}, // NOLINT
- {{1280, 1281, kSentinel}},
- {{1282, 1283, kSentinel}},
- {{1284, 1285, kSentinel}},
- {{1286, 1287, kSentinel}}, // NOLINT
- {{1288, 1289, kSentinel}},
- {{1290, 1291, kSentinel}},
- {{1292, 1293, kSentinel}},
- {{1294, 1295, kSentinel}}, // NOLINT
- {{1296, 1297, kSentinel}},
- {{1298, 1299, kSentinel}},
- {{1300, 1301, kSentinel}},
- {{1302, 1303, kSentinel}}, // NOLINT
- {{1304, 1305, kSentinel}},
- {{1306, 1307, kSentinel}},
- {{1308, 1309, kSentinel}},
- {{1310, 1311, kSentinel}}, // NOLINT
- {{1312, 1313, kSentinel}},
- {{1314, 1315, kSentinel}},
- {{1316, 1317, kSentinel}},
- {{1318, 1319, kSentinel}}, // NOLINT
- {{1320, 1321, kSentinel}},
- {{1322, 1323, kSentinel}},
- {{1324, 1325, kSentinel}},
- {{1326, 1327, kSentinel}}, // NOLINT
- {{1329, 1377, kSentinel}},
- {{1366, 1414, kSentinel}},
- {{4256, 11520, kSentinel}},
- {{4293, 11557, kSentinel}}, // NOLINT
- {{4295, 11559, kSentinel}},
- {{4301, 11565, kSentinel}},
- {{7545, 42877, kSentinel}},
- {{7549, 11363, kSentinel}}, // NOLINT
- {{7680, 7681, kSentinel}},
- {{7682, 7683, kSentinel}},
- {{7684, 7685, kSentinel}},
- {{7686, 7687, kSentinel}}, // NOLINT
- {{7688, 7689, kSentinel}},
- {{7690, 7691, kSentinel}},
- {{7692, 7693, kSentinel}},
- {{7694, 7695, kSentinel}}, // NOLINT
- {{7696, 7697, kSentinel}},
- {{7698, 7699, kSentinel}},
- {{7700, 7701, kSentinel}},
- {{7702, 7703, kSentinel}}, // NOLINT
- {{7704, 7705, kSentinel}},
- {{7706, 7707, kSentinel}},
- {{7708, 7709, kSentinel}},
- {{7710, 7711, kSentinel}}, // NOLINT
- {{7712, 7713, kSentinel}},
- {{7714, 7715, kSentinel}},
- {{7716, 7717, kSentinel}},
- {{7718, 7719, kSentinel}}, // NOLINT
- {{7720, 7721, kSentinel}},
- {{7722, 7723, kSentinel}},
- {{7724, 7725, kSentinel}},
- {{7726, 7727, kSentinel}}, // NOLINT
- {{7728, 7729, kSentinel}},
- {{7730, 7731, kSentinel}},
- {{7732, 7733, kSentinel}},
- {{7734, 7735, kSentinel}}, // NOLINT
- {{7736, 7737, kSentinel}},
- {{7738, 7739, kSentinel}},
- {{7740, 7741, kSentinel}},
- {{7742, 7743, kSentinel}}, // NOLINT
- {{7744, 7745, kSentinel}},
- {{7746, 7747, kSentinel}},
- {{7748, 7749, kSentinel}},
- {{7750, 7751, kSentinel}}, // NOLINT
- {{7752, 7753, kSentinel}},
- {{7754, 7755, kSentinel}},
- {{7756, 7757, kSentinel}},
- {{7758, 7759, kSentinel}}, // NOLINT
- {{7760, 7761, kSentinel}},
- {{7762, 7763, kSentinel}},
- {{7764, 7765, kSentinel}},
- {{7766, 7767, kSentinel}}, // NOLINT
- {{7768, 7769, kSentinel}},
- {{7770, 7771, kSentinel}},
- {{7772, 7773, kSentinel}},
- {{7774, 7775, kSentinel}}, // NOLINT
- {{7776, 7777, 7835, kSentinel}},
- {{7778, 7779, kSentinel}},
- {{7780, 7781, kSentinel}},
- {{7782, 7783, kSentinel}}, // NOLINT
- {{7784, 7785, kSentinel}},
- {{7786, 7787, kSentinel}},
- {{7788, 7789, kSentinel}},
- {{7790, 7791, kSentinel}}, // NOLINT
- {{7792, 7793, kSentinel}},
- {{7794, 7795, kSentinel}},
- {{7796, 7797, kSentinel}},
- {{7798, 7799, kSentinel}}, // NOLINT
- {{7800, 7801, kSentinel}},
- {{7802, 7803, kSentinel}},
- {{7804, 7805, kSentinel}},
- {{7806, 7807, kSentinel}}, // NOLINT
- {{7808, 7809, kSentinel}},
- {{7810, 7811, kSentinel}},
- {{7812, 7813, kSentinel}},
- {{7814, 7815, kSentinel}}, // NOLINT
- {{7816, 7817, kSentinel}},
- {{7818, 7819, kSentinel}},
- {{7820, 7821, kSentinel}},
- {{7822, 7823, kSentinel}}, // NOLINT
- {{7824, 7825, kSentinel}},
- {{7826, 7827, kSentinel}},
- {{7828, 7829, kSentinel}},
- {{7840, 7841, kSentinel}}, // NOLINT
- {{7842, 7843, kSentinel}},
- {{7844, 7845, kSentinel}},
- {{7846, 7847, kSentinel}},
- {{7848, 7849, kSentinel}}, // NOLINT
- {{7850, 7851, kSentinel}},
- {{7852, 7853, kSentinel}},
- {{7854, 7855, kSentinel}},
- {{7856, 7857, kSentinel}}, // NOLINT
- {{7858, 7859, kSentinel}},
- {{7860, 7861, kSentinel}},
- {{7862, 7863, kSentinel}},
- {{7864, 7865, kSentinel}}, // NOLINT
- {{7866, 7867, kSentinel}},
- {{7868, 7869, kSentinel}},
- {{7870, 7871, kSentinel}},
- {{7872, 7873, kSentinel}}, // NOLINT
- {{7874, 7875, kSentinel}},
- {{7876, 7877, kSentinel}},
- {{7878, 7879, kSentinel}},
- {{7880, 7881, kSentinel}}, // NOLINT
- {{7882, 7883, kSentinel}},
- {{7884, 7885, kSentinel}},
- {{7886, 7887, kSentinel}},
- {{7888, 7889, kSentinel}}, // NOLINT
- {{7890, 7891, kSentinel}},
- {{7892, 7893, kSentinel}},
- {{7894, 7895, kSentinel}},
- {{7896, 7897, kSentinel}}, // NOLINT
- {{7898, 7899, kSentinel}},
- {{7900, 7901, kSentinel}},
- {{7902, 7903, kSentinel}},
- {{7904, 7905, kSentinel}}, // NOLINT
- {{7906, 7907, kSentinel}},
- {{7908, 7909, kSentinel}},
- {{7910, 7911, kSentinel}},
- {{7912, 7913, kSentinel}}, // NOLINT
- {{7914, 7915, kSentinel}},
- {{7916, 7917, kSentinel}},
- {{7918, 7919, kSentinel}},
- {{7920, 7921, kSentinel}}, // NOLINT
- {{7922, 7923, kSentinel}},
- {{7924, 7925, kSentinel}},
- {{7926, 7927, kSentinel}},
- {{7928, 7929, kSentinel}}, // NOLINT
- {{7930, 7931, kSentinel}},
- {{7932, 7933, kSentinel}},
- {{7934, 7935, kSentinel}},
- {{7936, 7944, kSentinel}}, // NOLINT
- {{7943, 7951, kSentinel}},
- {{7952, 7960, kSentinel}},
- {{7957, 7965, kSentinel}},
- {{7968, 7976, kSentinel}}, // NOLINT
- {{7975, 7983, kSentinel}},
- {{7984, 7992, kSentinel}},
- {{7991, 7999, kSentinel}},
- {{8000, 8008, kSentinel}}, // NOLINT
- {{8005, 8013, kSentinel}},
- {{8017, 8025, kSentinel}},
- {{8019, 8027, kSentinel}},
- {{8021, 8029, kSentinel}}, // NOLINT
- {{8023, 8031, kSentinel}},
- {{8032, 8040, kSentinel}},
- {{8039, 8047, kSentinel}},
- {{8048, 8122, kSentinel}}, // NOLINT
- {{8049, 8123, kSentinel}},
- {{8050, 8136, kSentinel}},
- {{8053, 8139, kSentinel}},
- {{8054, 8154, kSentinel}}, // NOLINT
- {{8055, 8155, kSentinel}},
- {{8056, 8184, kSentinel}},
- {{8057, 8185, kSentinel}},
- {{8058, 8170, kSentinel}}, // NOLINT
- {{8059, 8171, kSentinel}},
- {{8060, 8186, kSentinel}},
- {{8061, 8187, kSentinel}},
- {{8112, 8120, kSentinel}}, // NOLINT
- {{8113, 8121, kSentinel}},
- {{8144, 8152, kSentinel}},
- {{8145, 8153, kSentinel}},
- {{8160, 8168, kSentinel}}, // NOLINT
- {{8161, 8169, kSentinel}},
- {{8165, 8172, kSentinel}},
- {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable0Size = 1005; // NOLINT
+ kEcma262UnCanonicalizeMultiStrings0[507] = {{{65, 97, kSentinel}},
+ {{90, 122, kSentinel}},
+ {{181, 924, 956, kSentinel}},
+ {{192, 224, kSentinel}},
+ {{214, 246, kSentinel}},
+ {{216, 248, kSentinel}},
+ {{222, 254, kSentinel}},
+ {{255, 376, kSentinel}},
+ {{256, 257, kSentinel}},
+ {{258, 259, kSentinel}},
+ {{260, 261, kSentinel}},
+ {{262, 263, kSentinel}},
+ {{264, 265, kSentinel}},
+ {{266, 267, kSentinel}},
+ {{268, 269, kSentinel}},
+ {{270, 271, kSentinel}},
+ {{272, 273, kSentinel}},
+ {{274, 275, kSentinel}},
+ {{276, 277, kSentinel}},
+ {{278, 279, kSentinel}},
+ {{280, 281, kSentinel}},
+ {{282, 283, kSentinel}},
+ {{284, 285, kSentinel}},
+ {{286, 287, kSentinel}},
+ {{288, 289, kSentinel}},
+ {{290, 291, kSentinel}},
+ {{292, 293, kSentinel}},
+ {{294, 295, kSentinel}},
+ {{296, 297, kSentinel}},
+ {{298, 299, kSentinel}},
+ {{300, 301, kSentinel}},
+ {{302, 303, kSentinel}},
+ {{306, 307, kSentinel}},
+ {{308, 309, kSentinel}},
+ {{310, 311, kSentinel}},
+ {{313, 314, kSentinel}},
+ {{315, 316, kSentinel}},
+ {{317, 318, kSentinel}},
+ {{319, 320, kSentinel}},
+ {{321, 322, kSentinel}},
+ {{323, 324, kSentinel}},
+ {{325, 326, kSentinel}},
+ {{327, 328, kSentinel}},
+ {{330, 331, kSentinel}},
+ {{332, 333, kSentinel}},
+ {{334, 335, kSentinel}},
+ {{336, 337, kSentinel}},
+ {{338, 339, kSentinel}},
+ {{340, 341, kSentinel}},
+ {{342, 343, kSentinel}},
+ {{344, 345, kSentinel}},
+ {{346, 347, kSentinel}},
+ {{348, 349, kSentinel}},
+ {{350, 351, kSentinel}},
+ {{352, 353, kSentinel}},
+ {{354, 355, kSentinel}},
+ {{356, 357, kSentinel}},
+ {{358, 359, kSentinel}},
+ {{360, 361, kSentinel}},
+ {{362, 363, kSentinel}},
+ {{364, 365, kSentinel}},
+ {{366, 367, kSentinel}},
+ {{368, 369, kSentinel}},
+ {{370, 371, kSentinel}},
+ {{372, 373, kSentinel}},
+ {{374, 375, kSentinel}},
+ {{377, 378, kSentinel}},
+ {{379, 380, kSentinel}},
+ {{381, 382, kSentinel}},
+ {{384, 579, kSentinel}},
+ {{385, 595, kSentinel}},
+ {{386, 387, kSentinel}},
+ {{388, 389, kSentinel}},
+ {{390, 596, kSentinel}},
+ {{391, 392, kSentinel}},
+ {{393, 598, kSentinel}},
+ {{394, 599, kSentinel}},
+ {{395, 396, kSentinel}},
+ {{398, 477, kSentinel}},
+ {{399, 601, kSentinel}},
+ {{400, 603, kSentinel}},
+ {{401, 402, kSentinel}},
+ {{403, 608, kSentinel}},
+ {{404, 611, kSentinel}},
+ {{405, 502, kSentinel}},
+ {{406, 617, kSentinel}},
+ {{407, 616, kSentinel}},
+ {{408, 409, kSentinel}},
+ {{410, 573, kSentinel}},
+ {{412, 623, kSentinel}},
+ {{413, 626, kSentinel}},
+ {{414, 544, kSentinel}},
+ {{415, 629, kSentinel}},
+ {{416, 417, kSentinel}},
+ {{418, 419, kSentinel}},
+ {{420, 421, kSentinel}},
+ {{422, 640, kSentinel}},
+ {{423, 424, kSentinel}},
+ {{425, 643, kSentinel}},
+ {{428, 429, kSentinel}},
+ {{430, 648, kSentinel}},
+ {{431, 432, kSentinel}},
+ {{433, 650, kSentinel}},
+ {{434, 651, kSentinel}},
+ {{435, 436, kSentinel}},
+ {{437, 438, kSentinel}},
+ {{439, 658, kSentinel}},
+ {{440, 441, kSentinel}},
+ {{444, 445, kSentinel}},
+ {{447, 503, kSentinel}},
+ {{452, 453, 454, kSentinel}},
+ {{455, 456, 457, kSentinel}},
+ {{458, 459, 460, kSentinel}},
+ {{461, 462, kSentinel}},
+ {{463, 464, kSentinel}},
+ {{465, 466, kSentinel}},
+ {{467, 468, kSentinel}},
+ {{469, 470, kSentinel}},
+ {{471, 472, kSentinel}},
+ {{473, 474, kSentinel}},
+ {{475, 476, kSentinel}},
+ {{478, 479, kSentinel}},
+ {{480, 481, kSentinel}},
+ {{482, 483, kSentinel}},
+ {{484, 485, kSentinel}},
+ {{486, 487, kSentinel}},
+ {{488, 489, kSentinel}},
+ {{490, 491, kSentinel}},
+ {{492, 493, kSentinel}},
+ {{494, 495, kSentinel}},
+ {{497, 498, 499, kSentinel}},
+ {{500, 501, kSentinel}},
+ {{504, 505, kSentinel}},
+ {{506, 507, kSentinel}},
+ {{508, 509, kSentinel}},
+ {{510, 511, kSentinel}},
+ {{512, 513, kSentinel}},
+ {{514, 515, kSentinel}},
+ {{516, 517, kSentinel}},
+ {{518, 519, kSentinel}},
+ {{520, 521, kSentinel}},
+ {{522, 523, kSentinel}},
+ {{524, 525, kSentinel}},
+ {{526, 527, kSentinel}},
+ {{528, 529, kSentinel}},
+ {{530, 531, kSentinel}},
+ {{532, 533, kSentinel}},
+ {{534, 535, kSentinel}},
+ {{536, 537, kSentinel}},
+ {{538, 539, kSentinel}},
+ {{540, 541, kSentinel}},
+ {{542, 543, kSentinel}},
+ {{546, 547, kSentinel}},
+ {{548, 549, kSentinel}},
+ {{550, 551, kSentinel}},
+ {{552, 553, kSentinel}},
+ {{554, 555, kSentinel}},
+ {{556, 557, kSentinel}},
+ {{558, 559, kSentinel}},
+ {{560, 561, kSentinel}},
+ {{562, 563, kSentinel}},
+ {{570, 11365, kSentinel}},
+ {{571, 572, kSentinel}},
+ {{574, 11366, kSentinel}},
+ {{575, 11390, kSentinel}},
+ {{576, 11391, kSentinel}},
+ {{577, 578, kSentinel}},
+ {{580, 649, kSentinel}},
+ {{581, 652, kSentinel}},
+ {{582, 583, kSentinel}},
+ {{584, 585, kSentinel}},
+ {{586, 587, kSentinel}},
+ {{588, 589, kSentinel}},
+ {{590, 591, kSentinel}},
+ {{592, 11375, kSentinel}},
+ {{593, 11373, kSentinel}},
+ {{594, 11376, kSentinel}},
+ {{604, 42923, kSentinel}},
+ {{609, 42924, kSentinel}},
+ {{613, 42893, kSentinel}},
+ {{614, 42922, kSentinel}},
+ {{619, 11362, kSentinel}},
+ {{620, 42925, kSentinel}},
+ {{625, 11374, kSentinel}},
+ {{637, 11364, kSentinel}},
+ {{647, 42929, kSentinel}},
+ {{670, 42928, kSentinel}},
+ {{837, 921, 953, 8126}},
+ {{880, 881, kSentinel}},
+ {{882, 883, kSentinel}},
+ {{886, 887, kSentinel}},
+ {{891, 1021, kSentinel}},
+ {{893, 1023, kSentinel}},
+ {{895, 1011, kSentinel}},
+ {{902, 940, kSentinel}},
+ {{904, 941, kSentinel}},
+ {{906, 943, kSentinel}},
+ {{908, 972, kSentinel}},
+ {{910, 973, kSentinel}},
+ {{911, 974, kSentinel}},
+ {{913, 945, kSentinel}},
+ {{914, 946, 976, kSentinel}},
+ {{915, 947, kSentinel}},
+ {{916, 948, kSentinel}},
+ {{917, 949, 1013, kSentinel}},
+ {{918, 950, kSentinel}},
+ {{919, 951, kSentinel}},
+ {{920, 952, 977, kSentinel}},
+ {{922, 954, 1008, kSentinel}},
+ {{923, 955, kSentinel}},
+ {{925, 957, kSentinel}},
+ {{927, 959, kSentinel}},
+ {{928, 960, 982, kSentinel}},
+ {{929, 961, 1009, kSentinel}},
+ {{931, 962, 963, kSentinel}},
+ {{932, 964, kSentinel}},
+ {{933, 965, kSentinel}},
+ {{934, 966, 981, kSentinel}},
+ {{935, 967, kSentinel}},
+ {{939, 971, kSentinel}},
+ {{975, 983, kSentinel}},
+ {{984, 985, kSentinel}},
+ {{986, 987, kSentinel}},
+ {{988, 989, kSentinel}},
+ {{990, 991, kSentinel}},
+ {{992, 993, kSentinel}},
+ {{994, 995, kSentinel}},
+ {{996, 997, kSentinel}},
+ {{998, 999, kSentinel}},
+ {{1000, 1001, kSentinel}},
+ {{1002, 1003, kSentinel}},
+ {{1004, 1005, kSentinel}},
+ {{1006, 1007, kSentinel}},
+ {{1010, 1017, kSentinel}},
+ {{1015, 1016, kSentinel}},
+ {{1018, 1019, kSentinel}},
+ {{1024, 1104, kSentinel}},
+ {{1039, 1119, kSentinel}},
+ {{1040, 1072, kSentinel}},
+ {{1071, 1103, kSentinel}},
+ {{1120, 1121, kSentinel}},
+ {{1122, 1123, kSentinel}},
+ {{1124, 1125, kSentinel}},
+ {{1126, 1127, kSentinel}},
+ {{1128, 1129, kSentinel}},
+ {{1130, 1131, kSentinel}},
+ {{1132, 1133, kSentinel}},
+ {{1134, 1135, kSentinel}},
+ {{1136, 1137, kSentinel}},
+ {{1138, 1139, kSentinel}},
+ {{1140, 1141, kSentinel}},
+ {{1142, 1143, kSentinel}},
+ {{1144, 1145, kSentinel}},
+ {{1146, 1147, kSentinel}},
+ {{1148, 1149, kSentinel}},
+ {{1150, 1151, kSentinel}},
+ {{1152, 1153, kSentinel}},
+ {{1162, 1163, kSentinel}},
+ {{1164, 1165, kSentinel}},
+ {{1166, 1167, kSentinel}},
+ {{1168, 1169, kSentinel}},
+ {{1170, 1171, kSentinel}},
+ {{1172, 1173, kSentinel}},
+ {{1174, 1175, kSentinel}},
+ {{1176, 1177, kSentinel}},
+ {{1178, 1179, kSentinel}},
+ {{1180, 1181, kSentinel}},
+ {{1182, 1183, kSentinel}},
+ {{1184, 1185, kSentinel}},
+ {{1186, 1187, kSentinel}},
+ {{1188, 1189, kSentinel}},
+ {{1190, 1191, kSentinel}},
+ {{1192, 1193, kSentinel}},
+ {{1194, 1195, kSentinel}},
+ {{1196, 1197, kSentinel}},
+ {{1198, 1199, kSentinel}},
+ {{1200, 1201, kSentinel}},
+ {{1202, 1203, kSentinel}},
+ {{1204, 1205, kSentinel}},
+ {{1206, 1207, kSentinel}},
+ {{1208, 1209, kSentinel}},
+ {{1210, 1211, kSentinel}},
+ {{1212, 1213, kSentinel}},
+ {{1214, 1215, kSentinel}},
+ {{1216, 1231, kSentinel}},
+ {{1217, 1218, kSentinel}},
+ {{1219, 1220, kSentinel}},
+ {{1221, 1222, kSentinel}},
+ {{1223, 1224, kSentinel}},
+ {{1225, 1226, kSentinel}},
+ {{1227, 1228, kSentinel}},
+ {{1229, 1230, kSentinel}},
+ {{1232, 1233, kSentinel}},
+ {{1234, 1235, kSentinel}},
+ {{1236, 1237, kSentinel}},
+ {{1238, 1239, kSentinel}},
+ {{1240, 1241, kSentinel}},
+ {{1242, 1243, kSentinel}},
+ {{1244, 1245, kSentinel}},
+ {{1246, 1247, kSentinel}},
+ {{1248, 1249, kSentinel}},
+ {{1250, 1251, kSentinel}},
+ {{1252, 1253, kSentinel}},
+ {{1254, 1255, kSentinel}},
+ {{1256, 1257, kSentinel}},
+ {{1258, 1259, kSentinel}},
+ {{1260, 1261, kSentinel}},
+ {{1262, 1263, kSentinel}},
+ {{1264, 1265, kSentinel}},
+ {{1266, 1267, kSentinel}},
+ {{1268, 1269, kSentinel}},
+ {{1270, 1271, kSentinel}},
+ {{1272, 1273, kSentinel}},
+ {{1274, 1275, kSentinel}},
+ {{1276, 1277, kSentinel}},
+ {{1278, 1279, kSentinel}},
+ {{1280, 1281, kSentinel}},
+ {{1282, 1283, kSentinel}},
+ {{1284, 1285, kSentinel}},
+ {{1286, 1287, kSentinel}},
+ {{1288, 1289, kSentinel}},
+ {{1290, 1291, kSentinel}},
+ {{1292, 1293, kSentinel}},
+ {{1294, 1295, kSentinel}},
+ {{1296, 1297, kSentinel}},
+ {{1298, 1299, kSentinel}},
+ {{1300, 1301, kSentinel}},
+ {{1302, 1303, kSentinel}},
+ {{1304, 1305, kSentinel}},
+ {{1306, 1307, kSentinel}},
+ {{1308, 1309, kSentinel}},
+ {{1310, 1311, kSentinel}},
+ {{1312, 1313, kSentinel}},
+ {{1314, 1315, kSentinel}},
+ {{1316, 1317, kSentinel}},
+ {{1318, 1319, kSentinel}},
+ {{1320, 1321, kSentinel}},
+ {{1322, 1323, kSentinel}},
+ {{1324, 1325, kSentinel}},
+ {{1326, 1327, kSentinel}},
+ {{1329, 1377, kSentinel}},
+ {{1366, 1414, kSentinel}},
+ {{4256, 11520, kSentinel}},
+ {{4293, 11557, kSentinel}},
+ {{4295, 11559, kSentinel}},
+ {{4301, 11565, kSentinel}},
+ {{7545, 42877, kSentinel}},
+ {{7549, 11363, kSentinel}},
+ {{7680, 7681, kSentinel}},
+ {{7682, 7683, kSentinel}},
+ {{7684, 7685, kSentinel}},
+ {{7686, 7687, kSentinel}},
+ {{7688, 7689, kSentinel}},
+ {{7690, 7691, kSentinel}},
+ {{7692, 7693, kSentinel}},
+ {{7694, 7695, kSentinel}},
+ {{7696, 7697, kSentinel}},
+ {{7698, 7699, kSentinel}},
+ {{7700, 7701, kSentinel}},
+ {{7702, 7703, kSentinel}},
+ {{7704, 7705, kSentinel}},
+ {{7706, 7707, kSentinel}},
+ {{7708, 7709, kSentinel}},
+ {{7710, 7711, kSentinel}},
+ {{7712, 7713, kSentinel}},
+ {{7714, 7715, kSentinel}},
+ {{7716, 7717, kSentinel}},
+ {{7718, 7719, kSentinel}},
+ {{7720, 7721, kSentinel}},
+ {{7722, 7723, kSentinel}},
+ {{7724, 7725, kSentinel}},
+ {{7726, 7727, kSentinel}},
+ {{7728, 7729, kSentinel}},
+ {{7730, 7731, kSentinel}},
+ {{7732, 7733, kSentinel}},
+ {{7734, 7735, kSentinel}},
+ {{7736, 7737, kSentinel}},
+ {{7738, 7739, kSentinel}},
+ {{7740, 7741, kSentinel}},
+ {{7742, 7743, kSentinel}},
+ {{7744, 7745, kSentinel}},
+ {{7746, 7747, kSentinel}},
+ {{7748, 7749, kSentinel}},
+ {{7750, 7751, kSentinel}},
+ {{7752, 7753, kSentinel}},
+ {{7754, 7755, kSentinel}},
+ {{7756, 7757, kSentinel}},
+ {{7758, 7759, kSentinel}},
+ {{7760, 7761, kSentinel}},
+ {{7762, 7763, kSentinel}},
+ {{7764, 7765, kSentinel}},
+ {{7766, 7767, kSentinel}},
+ {{7768, 7769, kSentinel}},
+ {{7770, 7771, kSentinel}},
+ {{7772, 7773, kSentinel}},
+ {{7774, 7775, kSentinel}},
+ {{7776, 7777, 7835, kSentinel}},
+ {{7778, 7779, kSentinel}},
+ {{7780, 7781, kSentinel}},
+ {{7782, 7783, kSentinel}},
+ {{7784, 7785, kSentinel}},
+ {{7786, 7787, kSentinel}},
+ {{7788, 7789, kSentinel}},
+ {{7790, 7791, kSentinel}},
+ {{7792, 7793, kSentinel}},
+ {{7794, 7795, kSentinel}},
+ {{7796, 7797, kSentinel}},
+ {{7798, 7799, kSentinel}},
+ {{7800, 7801, kSentinel}},
+ {{7802, 7803, kSentinel}},
+ {{7804, 7805, kSentinel}},
+ {{7806, 7807, kSentinel}},
+ {{7808, 7809, kSentinel}},
+ {{7810, 7811, kSentinel}},
+ {{7812, 7813, kSentinel}},
+ {{7814, 7815, kSentinel}},
+ {{7816, 7817, kSentinel}},
+ {{7818, 7819, kSentinel}},
+ {{7820, 7821, kSentinel}},
+ {{7822, 7823, kSentinel}},
+ {{7824, 7825, kSentinel}},
+ {{7826, 7827, kSentinel}},
+ {{7828, 7829, kSentinel}},
+ {{7840, 7841, kSentinel}},
+ {{7842, 7843, kSentinel}},
+ {{7844, 7845, kSentinel}},
+ {{7846, 7847, kSentinel}},
+ {{7848, 7849, kSentinel}},
+ {{7850, 7851, kSentinel}},
+ {{7852, 7853, kSentinel}},
+ {{7854, 7855, kSentinel}},
+ {{7856, 7857, kSentinel}},
+ {{7858, 7859, kSentinel}},
+ {{7860, 7861, kSentinel}},
+ {{7862, 7863, kSentinel}},
+ {{7864, 7865, kSentinel}},
+ {{7866, 7867, kSentinel}},
+ {{7868, 7869, kSentinel}},
+ {{7870, 7871, kSentinel}},
+ {{7872, 7873, kSentinel}},
+ {{7874, 7875, kSentinel}},
+ {{7876, 7877, kSentinel}},
+ {{7878, 7879, kSentinel}},
+ {{7880, 7881, kSentinel}},
+ {{7882, 7883, kSentinel}},
+ {{7884, 7885, kSentinel}},
+ {{7886, 7887, kSentinel}},
+ {{7888, 7889, kSentinel}},
+ {{7890, 7891, kSentinel}},
+ {{7892, 7893, kSentinel}},
+ {{7894, 7895, kSentinel}},
+ {{7896, 7897, kSentinel}},
+ {{7898, 7899, kSentinel}},
+ {{7900, 7901, kSentinel}},
+ {{7902, 7903, kSentinel}},
+ {{7904, 7905, kSentinel}},
+ {{7906, 7907, kSentinel}},
+ {{7908, 7909, kSentinel}},
+ {{7910, 7911, kSentinel}},
+ {{7912, 7913, kSentinel}},
+ {{7914, 7915, kSentinel}},
+ {{7916, 7917, kSentinel}},
+ {{7918, 7919, kSentinel}},
+ {{7920, 7921, kSentinel}},
+ {{7922, 7923, kSentinel}},
+ {{7924, 7925, kSentinel}},
+ {{7926, 7927, kSentinel}},
+ {{7928, 7929, kSentinel}},
+ {{7930, 7931, kSentinel}},
+ {{7932, 7933, kSentinel}},
+ {{7934, 7935, kSentinel}},
+ {{7936, 7944, kSentinel}},
+ {{7943, 7951, kSentinel}},
+ {{7952, 7960, kSentinel}},
+ {{7957, 7965, kSentinel}},
+ {{7968, 7976, kSentinel}},
+ {{7975, 7983, kSentinel}},
+ {{7984, 7992, kSentinel}},
+ {{7991, 7999, kSentinel}},
+ {{8000, 8008, kSentinel}},
+ {{8005, 8013, kSentinel}},
+ {{8017, 8025, kSentinel}},
+ {{8019, 8027, kSentinel}},
+ {{8021, 8029, kSentinel}},
+ {{8023, 8031, kSentinel}},
+ {{8032, 8040, kSentinel}},
+ {{8039, 8047, kSentinel}},
+ {{8048, 8122, kSentinel}},
+ {{8049, 8123, kSentinel}},
+ {{8050, 8136, kSentinel}},
+ {{8053, 8139, kSentinel}},
+ {{8054, 8154, kSentinel}},
+ {{8055, 8155, kSentinel}},
+ {{8056, 8184, kSentinel}},
+ {{8057, 8185, kSentinel}},
+ {{8058, 8170, kSentinel}},
+ {{8059, 8171, kSentinel}},
+ {{8060, 8186, kSentinel}},
+ {{8061, 8187, kSentinel}},
+ {{8112, 8120, kSentinel}},
+ {{8113, 8121, kSentinel}},
+ {{8144, 8152, kSentinel}},
+ {{8145, 8153, kSentinel}},
+ {{8160, 8168, kSentinel}},
+ {{8161, 8169, kSentinel}},
+ {{8165, 8172, kSentinel}},
+ {{kSentinel}}};
+static const uint16_t kEcma262UnCanonicalizeTable0Size = 1005;
static const int32_t kEcma262UnCanonicalizeTable0[2010] = {
- 1073741889, 1, 90, 5, 1073741921, 1,
- 122, 5, 181, 9, 1073742016, 13,
- 214, 17, 1073742040, 21, // NOLINT
- 222, 25, 1073742048, 13, 246, 17,
- 1073742072, 21, 254, 25, 255, 29,
- 256, 33, 257, 33, // NOLINT
- 258, 37, 259, 37, 260, 41,
- 261, 41, 262, 45, 263, 45,
- 264, 49, 265, 49, // NOLINT
- 266, 53, 267, 53, 268, 57,
- 269, 57, 270, 61, 271, 61,
- 272, 65, 273, 65, // NOLINT
- 274, 69, 275, 69, 276, 73,
- 277, 73, 278, 77, 279, 77,
- 280, 81, 281, 81, // NOLINT
- 282, 85, 283, 85, 284, 89,
- 285, 89, 286, 93, 287, 93,
- 288, 97, 289, 97, // NOLINT
- 290, 101, 291, 101, 292, 105,
- 293, 105, 294, 109, 295, 109,
- 296, 113, 297, 113, // NOLINT
- 298, 117, 299, 117, 300, 121,
- 301, 121, 302, 125, 303, 125,
- 306, 129, 307, 129, // NOLINT
- 308, 133, 309, 133, 310, 137,
- 311, 137, 313, 141, 314, 141,
- 315, 145, 316, 145, // NOLINT
- 317, 149, 318, 149, 319, 153,
- 320, 153, 321, 157, 322, 157,
- 323, 161, 324, 161, // NOLINT
- 325, 165, 326, 165, 327, 169,
- 328, 169, 330, 173, 331, 173,
- 332, 177, 333, 177, // NOLINT
- 334, 181, 335, 181, 336, 185,
- 337, 185, 338, 189, 339, 189,
- 340, 193, 341, 193, // NOLINT
- 342, 197, 343, 197, 344, 201,
- 345, 201, 346, 205, 347, 205,
- 348, 209, 349, 209, // NOLINT
- 350, 213, 351, 213, 352, 217,
- 353, 217, 354, 221, 355, 221,
- 356, 225, 357, 225, // NOLINT
- 358, 229, 359, 229, 360, 233,
- 361, 233, 362, 237, 363, 237,
- 364, 241, 365, 241, // NOLINT
- 366, 245, 367, 245, 368, 249,
- 369, 249, 370, 253, 371, 253,
- 372, 257, 373, 257, // NOLINT
- 374, 261, 375, 261, 376, 29,
- 377, 265, 378, 265, 379, 269,
- 380, 269, 381, 273, // NOLINT
- 382, 273, 384, 277, 385, 281,
- 386, 285, 387, 285, 388, 289,
- 389, 289, 390, 293, // NOLINT
- 391, 297, 392, 297, 1073742217, 301,
- 394, 305, 395, 309, 396, 309,
- 398, 313, 399, 317, // NOLINT
- 400, 321, 401, 325, 402, 325,
- 403, 329, 404, 333, 405, 337,
- 406, 341, 407, 345, // NOLINT
- 408, 349, 409, 349, 410, 353,
- 412, 357, 413, 361, 414, 365,
- 415, 369, 416, 373, // NOLINT
- 417, 373, 418, 377, 419, 377,
- 420, 381, 421, 381, 422, 385,
- 423, 389, 424, 389, // NOLINT
- 425, 393, 428, 397, 429, 397,
- 430, 401, 431, 405, 432, 405,
- 1073742257, 409, 434, 413, // NOLINT
- 435, 417, 436, 417, 437, 421,
- 438, 421, 439, 425, 440, 429,
- 441, 429, 444, 433, // NOLINT
- 445, 433, 447, 437, 452, 441,
- 453, 441, 454, 441, 455, 445,
- 456, 445, 457, 445, // NOLINT
- 458, 449, 459, 449, 460, 449,
- 461, 453, 462, 453, 463, 457,
- 464, 457, 465, 461, // NOLINT
- 466, 461, 467, 465, 468, 465,
- 469, 469, 470, 469, 471, 473,
- 472, 473, 473, 477, // NOLINT
- 474, 477, 475, 481, 476, 481,
- 477, 313, 478, 485, 479, 485,
- 480, 489, 481, 489, // NOLINT
- 482, 493, 483, 493, 484, 497,
- 485, 497, 486, 501, 487, 501,
- 488, 505, 489, 505, // NOLINT
- 490, 509, 491, 509, 492, 513,
- 493, 513, 494, 517, 495, 517,
- 497, 521, 498, 521, // NOLINT
- 499, 521, 500, 525, 501, 525,
- 502, 337, 503, 437, 504, 529,
- 505, 529, 506, 533, // NOLINT
- 507, 533, 508, 537, 509, 537,
- 510, 541, 511, 541, 512, 545,
- 513, 545, 514, 549, // NOLINT
- 515, 549, 516, 553, 517, 553,
- 518, 557, 519, 557, 520, 561,
- 521, 561, 522, 565, // NOLINT
- 523, 565, 524, 569, 525, 569,
- 526, 573, 527, 573, 528, 577,
- 529, 577, 530, 581, // NOLINT
- 531, 581, 532, 585, 533, 585,
- 534, 589, 535, 589, 536, 593,
- 537, 593, 538, 597, // NOLINT
- 539, 597, 540, 601, 541, 601,
- 542, 605, 543, 605, 544, 365,
- 546, 609, 547, 609, // NOLINT
- 548, 613, 549, 613, 550, 617,
- 551, 617, 552, 621, 553, 621,
- 554, 625, 555, 625, // NOLINT
- 556, 629, 557, 629, 558, 633,
- 559, 633, 560, 637, 561, 637,
- 562, 641, 563, 641, // NOLINT
- 570, 645, 571, 649, 572, 649,
- 573, 353, 574, 653, 1073742399, 657,
- 576, 661, 577, 665, // NOLINT
- 578, 665, 579, 277, 580, 669,
- 581, 673, 582, 677, 583, 677,
- 584, 681, 585, 681, // NOLINT
- 586, 685, 587, 685, 588, 689,
- 589, 689, 590, 693, 591, 693,
- 592, 697, 593, 701, // NOLINT
- 594, 705, 595, 281, 596, 293,
- 1073742422, 301, 599, 305, 601, 317,
- 603, 321, 604, 709, // NOLINT
- 608, 329, 609, 713, 611, 333,
- 613, 717, 614, 721, 616, 345,
- 617, 341, 619, 725, // NOLINT
- 620, 729, 623, 357, 625, 733,
- 626, 361, 629, 369, 637, 737,
- 640, 385, 643, 393, // NOLINT
- 647, 741, 648, 401, 649, 669,
- 1073742474, 409, 651, 413, 652, 673,
- 658, 425, 670, 745, // NOLINT
- 837, 749, 880, 753, 881, 753,
- 882, 757, 883, 757, 886, 761,
- 887, 761, 1073742715, 765, // NOLINT
- 893, 769, 895, 773, 902, 777,
- 1073742728, 781, 906, 785, 908, 789,
- 1073742734, 793, 911, 797, // NOLINT
- 913, 801, 914, 805, 1073742739, 809,
- 916, 813, 917, 817, 1073742742, 821,
- 919, 825, 920, 829, // NOLINT
- 921, 749, 922, 833, 923, 837,
- 924, 9, 1073742749, 841, 927, 845,
- 928, 849, 929, 853, // NOLINT
- 931, 857, 1073742756, 861, 933, 865,
- 934, 869, 1073742759, 873, 939, 877,
- 940, 777, 1073742765, 781, // NOLINT
- 943, 785, 945, 801, 946, 805,
- 1073742771, 809, 948, 813, 949, 817,
- 1073742774, 821, 951, 825, // NOLINT
- 952, 829, 953, 749, 954, 833,
- 955, 837, 956, 9, 1073742781, 841,
- 959, 845, 960, 849, // NOLINT
- 961, 853, 962, 857, 963, 857,
- 1073742788, 861, 965, 865, 966, 869,
- 1073742791, 873, 971, 877, // NOLINT
- 972, 789, 1073742797, 793, 974, 797,
- 975, 881, 976, 805, 977, 829,
- 981, 869, 982, 849, // NOLINT
- 983, 881, 984, 885, 985, 885,
- 986, 889, 987, 889, 988, 893,
- 989, 893, 990, 897, // NOLINT
- 991, 897, 992, 901, 993, 901,
- 994, 905, 995, 905, 996, 909,
- 997, 909, 998, 913, // NOLINT
- 999, 913, 1000, 917, 1001, 917,
- 1002, 921, 1003, 921, 1004, 925,
- 1005, 925, 1006, 929, // NOLINT
- 1007, 929, 1008, 833, 1009, 853,
- 1010, 933, 1011, 773, 1013, 817,
- 1015, 937, 1016, 937, // NOLINT
- 1017, 933, 1018, 941, 1019, 941,
- 1073742845, 765, 1023, 769, 1073742848, 945,
- 1039, 949, 1073742864, 953, // NOLINT
- 1071, 957, 1073742896, 953, 1103, 957,
- 1073742928, 945, 1119, 949, 1120, 961,
- 1121, 961, 1122, 965, // NOLINT
- 1123, 965, 1124, 969, 1125, 969,
- 1126, 973, 1127, 973, 1128, 977,
- 1129, 977, 1130, 981, // NOLINT
- 1131, 981, 1132, 985, 1133, 985,
- 1134, 989, 1135, 989, 1136, 993,
- 1137, 993, 1138, 997, // NOLINT
- 1139, 997, 1140, 1001, 1141, 1001,
- 1142, 1005, 1143, 1005, 1144, 1009,
- 1145, 1009, 1146, 1013, // NOLINT
- 1147, 1013, 1148, 1017, 1149, 1017,
- 1150, 1021, 1151, 1021, 1152, 1025,
- 1153, 1025, 1162, 1029, // NOLINT
- 1163, 1029, 1164, 1033, 1165, 1033,
- 1166, 1037, 1167, 1037, 1168, 1041,
- 1169, 1041, 1170, 1045, // NOLINT
- 1171, 1045, 1172, 1049, 1173, 1049,
- 1174, 1053, 1175, 1053, 1176, 1057,
- 1177, 1057, 1178, 1061, // NOLINT
- 1179, 1061, 1180, 1065, 1181, 1065,
- 1182, 1069, 1183, 1069, 1184, 1073,
- 1185, 1073, 1186, 1077, // NOLINT
- 1187, 1077, 1188, 1081, 1189, 1081,
- 1190, 1085, 1191, 1085, 1192, 1089,
- 1193, 1089, 1194, 1093, // NOLINT
- 1195, 1093, 1196, 1097, 1197, 1097,
- 1198, 1101, 1199, 1101, 1200, 1105,
- 1201, 1105, 1202, 1109, // NOLINT
- 1203, 1109, 1204, 1113, 1205, 1113,
- 1206, 1117, 1207, 1117, 1208, 1121,
- 1209, 1121, 1210, 1125, // NOLINT
- 1211, 1125, 1212, 1129, 1213, 1129,
- 1214, 1133, 1215, 1133, 1216, 1137,
- 1217, 1141, 1218, 1141, // NOLINT
- 1219, 1145, 1220, 1145, 1221, 1149,
- 1222, 1149, 1223, 1153, 1224, 1153,
- 1225, 1157, 1226, 1157, // NOLINT
- 1227, 1161, 1228, 1161, 1229, 1165,
- 1230, 1165, 1231, 1137, 1232, 1169,
- 1233, 1169, 1234, 1173, // NOLINT
- 1235, 1173, 1236, 1177, 1237, 1177,
- 1238, 1181, 1239, 1181, 1240, 1185,
- 1241, 1185, 1242, 1189, // NOLINT
- 1243, 1189, 1244, 1193, 1245, 1193,
- 1246, 1197, 1247, 1197, 1248, 1201,
- 1249, 1201, 1250, 1205, // NOLINT
- 1251, 1205, 1252, 1209, 1253, 1209,
- 1254, 1213, 1255, 1213, 1256, 1217,
- 1257, 1217, 1258, 1221, // NOLINT
- 1259, 1221, 1260, 1225, 1261, 1225,
- 1262, 1229, 1263, 1229, 1264, 1233,
- 1265, 1233, 1266, 1237, // NOLINT
- 1267, 1237, 1268, 1241, 1269, 1241,
- 1270, 1245, 1271, 1245, 1272, 1249,
- 1273, 1249, 1274, 1253, // NOLINT
- 1275, 1253, 1276, 1257, 1277, 1257,
- 1278, 1261, 1279, 1261, 1280, 1265,
- 1281, 1265, 1282, 1269, // NOLINT
- 1283, 1269, 1284, 1273, 1285, 1273,
- 1286, 1277, 1287, 1277, 1288, 1281,
- 1289, 1281, 1290, 1285, // NOLINT
- 1291, 1285, 1292, 1289, 1293, 1289,
- 1294, 1293, 1295, 1293, 1296, 1297,
- 1297, 1297, 1298, 1301, // NOLINT
- 1299, 1301, 1300, 1305, 1301, 1305,
- 1302, 1309, 1303, 1309, 1304, 1313,
- 1305, 1313, 1306, 1317, // NOLINT
- 1307, 1317, 1308, 1321, 1309, 1321,
- 1310, 1325, 1311, 1325, 1312, 1329,
- 1313, 1329, 1314, 1333, // NOLINT
- 1315, 1333, 1316, 1337, 1317, 1337,
- 1318, 1341, 1319, 1341, 1320, 1345,
- 1321, 1345, 1322, 1349, // NOLINT
- 1323, 1349, 1324, 1353, 1325, 1353,
- 1326, 1357, 1327, 1357, 1073743153, 1361,
- 1366, 1365, 1073743201, 1361, // NOLINT
- 1414, 1365, 1073746080, 1369, 4293, 1373,
- 4295, 1377, 4301, 1381, 7545, 1385,
- 7549, 1389, 7680, 1393, // NOLINT
- 7681, 1393, 7682, 1397, 7683, 1397,
- 7684, 1401, 7685, 1401, 7686, 1405,
- 7687, 1405, 7688, 1409, // NOLINT
- 7689, 1409, 7690, 1413, 7691, 1413,
- 7692, 1417, 7693, 1417, 7694, 1421,
- 7695, 1421, 7696, 1425, // NOLINT
- 7697, 1425, 7698, 1429, 7699, 1429,
- 7700, 1433, 7701, 1433, 7702, 1437,
- 7703, 1437, 7704, 1441, // NOLINT
- 7705, 1441, 7706, 1445, 7707, 1445,
- 7708, 1449, 7709, 1449, 7710, 1453,
- 7711, 1453, 7712, 1457, // NOLINT
- 7713, 1457, 7714, 1461, 7715, 1461,
- 7716, 1465, 7717, 1465, 7718, 1469,
- 7719, 1469, 7720, 1473, // NOLINT
- 7721, 1473, 7722, 1477, 7723, 1477,
- 7724, 1481, 7725, 1481, 7726, 1485,
- 7727, 1485, 7728, 1489, // NOLINT
- 7729, 1489, 7730, 1493, 7731, 1493,
- 7732, 1497, 7733, 1497, 7734, 1501,
- 7735, 1501, 7736, 1505, // NOLINT
- 7737, 1505, 7738, 1509, 7739, 1509,
- 7740, 1513, 7741, 1513, 7742, 1517,
- 7743, 1517, 7744, 1521, // NOLINT
- 7745, 1521, 7746, 1525, 7747, 1525,
- 7748, 1529, 7749, 1529, 7750, 1533,
- 7751, 1533, 7752, 1537, // NOLINT
- 7753, 1537, 7754, 1541, 7755, 1541,
- 7756, 1545, 7757, 1545, 7758, 1549,
- 7759, 1549, 7760, 1553, // NOLINT
- 7761, 1553, 7762, 1557, 7763, 1557,
- 7764, 1561, 7765, 1561, 7766, 1565,
- 7767, 1565, 7768, 1569, // NOLINT
- 7769, 1569, 7770, 1573, 7771, 1573,
- 7772, 1577, 7773, 1577, 7774, 1581,
- 7775, 1581, 7776, 1585, // NOLINT
- 7777, 1585, 7778, 1589, 7779, 1589,
- 7780, 1593, 7781, 1593, 7782, 1597,
- 7783, 1597, 7784, 1601, // NOLINT
- 7785, 1601, 7786, 1605, 7787, 1605,
- 7788, 1609, 7789, 1609, 7790, 1613,
- 7791, 1613, 7792, 1617, // NOLINT
- 7793, 1617, 7794, 1621, 7795, 1621,
- 7796, 1625, 7797, 1625, 7798, 1629,
- 7799, 1629, 7800, 1633, // NOLINT
- 7801, 1633, 7802, 1637, 7803, 1637,
- 7804, 1641, 7805, 1641, 7806, 1645,
- 7807, 1645, 7808, 1649, // NOLINT
- 7809, 1649, 7810, 1653, 7811, 1653,
- 7812, 1657, 7813, 1657, 7814, 1661,
- 7815, 1661, 7816, 1665, // NOLINT
- 7817, 1665, 7818, 1669, 7819, 1669,
- 7820, 1673, 7821, 1673, 7822, 1677,
- 7823, 1677, 7824, 1681, // NOLINT
- 7825, 1681, 7826, 1685, 7827, 1685,
- 7828, 1689, 7829, 1689, 7835, 1585,
- 7840, 1693, 7841, 1693, // NOLINT
- 7842, 1697, 7843, 1697, 7844, 1701,
- 7845, 1701, 7846, 1705, 7847, 1705,
- 7848, 1709, 7849, 1709, // NOLINT
- 7850, 1713, 7851, 1713, 7852, 1717,
- 7853, 1717, 7854, 1721, 7855, 1721,
- 7856, 1725, 7857, 1725, // NOLINT
- 7858, 1729, 7859, 1729, 7860, 1733,
- 7861, 1733, 7862, 1737, 7863, 1737,
- 7864, 1741, 7865, 1741, // NOLINT
- 7866, 1745, 7867, 1745, 7868, 1749,
- 7869, 1749, 7870, 1753, 7871, 1753,
- 7872, 1757, 7873, 1757, // NOLINT
- 7874, 1761, 7875, 1761, 7876, 1765,
- 7877, 1765, 7878, 1769, 7879, 1769,
- 7880, 1773, 7881, 1773, // NOLINT
- 7882, 1777, 7883, 1777, 7884, 1781,
- 7885, 1781, 7886, 1785, 7887, 1785,
- 7888, 1789, 7889, 1789, // NOLINT
- 7890, 1793, 7891, 1793, 7892, 1797,
- 7893, 1797, 7894, 1801, 7895, 1801,
- 7896, 1805, 7897, 1805, // NOLINT
- 7898, 1809, 7899, 1809, 7900, 1813,
- 7901, 1813, 7902, 1817, 7903, 1817,
- 7904, 1821, 7905, 1821, // NOLINT
- 7906, 1825, 7907, 1825, 7908, 1829,
- 7909, 1829, 7910, 1833, 7911, 1833,
- 7912, 1837, 7913, 1837, // NOLINT
- 7914, 1841, 7915, 1841, 7916, 1845,
- 7917, 1845, 7918, 1849, 7919, 1849,
- 7920, 1853, 7921, 1853, // NOLINT
- 7922, 1857, 7923, 1857, 7924, 1861,
- 7925, 1861, 7926, 1865, 7927, 1865,
- 7928, 1869, 7929, 1869, // NOLINT
- 7930, 1873, 7931, 1873, 7932, 1877,
- 7933, 1877, 7934, 1881, 7935, 1881,
- 1073749760, 1885, 7943, 1889, // NOLINT
- 1073749768, 1885, 7951, 1889, 1073749776, 1893,
- 7957, 1897, 1073749784, 1893, 7965, 1897,
- 1073749792, 1901, 7975, 1905, // NOLINT
- 1073749800, 1901, 7983, 1905, 1073749808, 1909,
- 7991, 1913, 1073749816, 1909, 7999, 1913,
- 1073749824, 1917, 8005, 1921, // NOLINT
- 1073749832, 1917, 8013, 1921, 8017, 1925,
- 8019, 1929, 8021, 1933, 8023, 1937,
- 8025, 1925, 8027, 1929, // NOLINT
- 8029, 1933, 8031, 1937, 1073749856, 1941,
- 8039, 1945, 1073749864, 1941, 8047, 1945,
- 1073749872, 1949, 8049, 1953, // NOLINT
- 1073749874, 1957, 8053, 1961, 1073749878, 1965,
- 8055, 1969, 1073749880, 1973, 8057, 1977,
- 1073749882, 1981, 8059, 1985, // NOLINT
- 1073749884, 1989, 8061, 1993, 1073749936, 1997,
- 8113, 2001, 1073749944, 1997, 8121, 2001,
- 1073749946, 1949, 8123, 1953, // NOLINT
- 8126, 749, 1073749960, 1957, 8139, 1961,
- 1073749968, 2005, 8145, 2009, 1073749976, 2005,
- 8153, 2009, 1073749978, 1965, // NOLINT
- 8155, 1969, 1073749984, 2013, 8161, 2017,
- 8165, 2021, 1073749992, 2013, 8169, 2017,
- 1073749994, 1981, 8171, 1985, // NOLINT
- 8172, 2021, 1073750008, 1973, 8185, 1977,
- 1073750010, 1989, 8187, 1993}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 507; // NOLINT
+ 1073741889, 1, 90, 5, 1073741921, 1, 122, 5,
+ 181, 9, 1073742016, 13, 214, 17, 1073742040, 21,
+ 222, 25, 1073742048, 13, 246, 17, 1073742072, 21,
+ 254, 25, 255, 29, 256, 33, 257, 33,
+ 258, 37, 259, 37, 260, 41, 261, 41,
+ 262, 45, 263, 45, 264, 49, 265, 49,
+ 266, 53, 267, 53, 268, 57, 269, 57,
+ 270, 61, 271, 61, 272, 65, 273, 65,
+ 274, 69, 275, 69, 276, 73, 277, 73,
+ 278, 77, 279, 77, 280, 81, 281, 81,
+ 282, 85, 283, 85, 284, 89, 285, 89,
+ 286, 93, 287, 93, 288, 97, 289, 97,
+ 290, 101, 291, 101, 292, 105, 293, 105,
+ 294, 109, 295, 109, 296, 113, 297, 113,
+ 298, 117, 299, 117, 300, 121, 301, 121,
+ 302, 125, 303, 125, 306, 129, 307, 129,
+ 308, 133, 309, 133, 310, 137, 311, 137,
+ 313, 141, 314, 141, 315, 145, 316, 145,
+ 317, 149, 318, 149, 319, 153, 320, 153,
+ 321, 157, 322, 157, 323, 161, 324, 161,
+ 325, 165, 326, 165, 327, 169, 328, 169,
+ 330, 173, 331, 173, 332, 177, 333, 177,
+ 334, 181, 335, 181, 336, 185, 337, 185,
+ 338, 189, 339, 189, 340, 193, 341, 193,
+ 342, 197, 343, 197, 344, 201, 345, 201,
+ 346, 205, 347, 205, 348, 209, 349, 209,
+ 350, 213, 351, 213, 352, 217, 353, 217,
+ 354, 221, 355, 221, 356, 225, 357, 225,
+ 358, 229, 359, 229, 360, 233, 361, 233,
+ 362, 237, 363, 237, 364, 241, 365, 241,
+ 366, 245, 367, 245, 368, 249, 369, 249,
+ 370, 253, 371, 253, 372, 257, 373, 257,
+ 374, 261, 375, 261, 376, 29, 377, 265,
+ 378, 265, 379, 269, 380, 269, 381, 273,
+ 382, 273, 384, 277, 385, 281, 386, 285,
+ 387, 285, 388, 289, 389, 289, 390, 293,
+ 391, 297, 392, 297, 1073742217, 301, 394, 305,
+ 395, 309, 396, 309, 398, 313, 399, 317,
+ 400, 321, 401, 325, 402, 325, 403, 329,
+ 404, 333, 405, 337, 406, 341, 407, 345,
+ 408, 349, 409, 349, 410, 353, 412, 357,
+ 413, 361, 414, 365, 415, 369, 416, 373,
+ 417, 373, 418, 377, 419, 377, 420, 381,
+ 421, 381, 422, 385, 423, 389, 424, 389,
+ 425, 393, 428, 397, 429, 397, 430, 401,
+ 431, 405, 432, 405, 1073742257, 409, 434, 413,
+ 435, 417, 436, 417, 437, 421, 438, 421,
+ 439, 425, 440, 429, 441, 429, 444, 433,
+ 445, 433, 447, 437, 452, 441, 453, 441,
+ 454, 441, 455, 445, 456, 445, 457, 445,
+ 458, 449, 459, 449, 460, 449, 461, 453,
+ 462, 453, 463, 457, 464, 457, 465, 461,
+ 466, 461, 467, 465, 468, 465, 469, 469,
+ 470, 469, 471, 473, 472, 473, 473, 477,
+ 474, 477, 475, 481, 476, 481, 477, 313,
+ 478, 485, 479, 485, 480, 489, 481, 489,
+ 482, 493, 483, 493, 484, 497, 485, 497,
+ 486, 501, 487, 501, 488, 505, 489, 505,
+ 490, 509, 491, 509, 492, 513, 493, 513,
+ 494, 517, 495, 517, 497, 521, 498, 521,
+ 499, 521, 500, 525, 501, 525, 502, 337,
+ 503, 437, 504, 529, 505, 529, 506, 533,
+ 507, 533, 508, 537, 509, 537, 510, 541,
+ 511, 541, 512, 545, 513, 545, 514, 549,
+ 515, 549, 516, 553, 517, 553, 518, 557,
+ 519, 557, 520, 561, 521, 561, 522, 565,
+ 523, 565, 524, 569, 525, 569, 526, 573,
+ 527, 573, 528, 577, 529, 577, 530, 581,
+ 531, 581, 532, 585, 533, 585, 534, 589,
+ 535, 589, 536, 593, 537, 593, 538, 597,
+ 539, 597, 540, 601, 541, 601, 542, 605,
+ 543, 605, 544, 365, 546, 609, 547, 609,
+ 548, 613, 549, 613, 550, 617, 551, 617,
+ 552, 621, 553, 621, 554, 625, 555, 625,
+ 556, 629, 557, 629, 558, 633, 559, 633,
+ 560, 637, 561, 637, 562, 641, 563, 641,
+ 570, 645, 571, 649, 572, 649, 573, 353,
+ 574, 653, 1073742399, 657, 576, 661, 577, 665,
+ 578, 665, 579, 277, 580, 669, 581, 673,
+ 582, 677, 583, 677, 584, 681, 585, 681,
+ 586, 685, 587, 685, 588, 689, 589, 689,
+ 590, 693, 591, 693, 592, 697, 593, 701,
+ 594, 705, 595, 281, 596, 293, 1073742422, 301,
+ 599, 305, 601, 317, 603, 321, 604, 709,
+ 608, 329, 609, 713, 611, 333, 613, 717,
+ 614, 721, 616, 345, 617, 341, 619, 725,
+ 620, 729, 623, 357, 625, 733, 626, 361,
+ 629, 369, 637, 737, 640, 385, 643, 393,
+ 647, 741, 648, 401, 649, 669, 1073742474, 409,
+ 651, 413, 652, 673, 658, 425, 670, 745,
+ 837, 749, 880, 753, 881, 753, 882, 757,
+ 883, 757, 886, 761, 887, 761, 1073742715, 765,
+ 893, 769, 895, 773, 902, 777, 1073742728, 781,
+ 906, 785, 908, 789, 1073742734, 793, 911, 797,
+ 913, 801, 914, 805, 1073742739, 809, 916, 813,
+ 917, 817, 1073742742, 821, 919, 825, 920, 829,
+ 921, 749, 922, 833, 923, 837, 924, 9,
+ 1073742749, 841, 927, 845, 928, 849, 929, 853,
+ 931, 857, 1073742756, 861, 933, 865, 934, 869,
+ 1073742759, 873, 939, 877, 940, 777, 1073742765, 781,
+ 943, 785, 945, 801, 946, 805, 1073742771, 809,
+ 948, 813, 949, 817, 1073742774, 821, 951, 825,
+ 952, 829, 953, 749, 954, 833, 955, 837,
+ 956, 9, 1073742781, 841, 959, 845, 960, 849,
+ 961, 853, 962, 857, 963, 857, 1073742788, 861,
+ 965, 865, 966, 869, 1073742791, 873, 971, 877,
+ 972, 789, 1073742797, 793, 974, 797, 975, 881,
+ 976, 805, 977, 829, 981, 869, 982, 849,
+ 983, 881, 984, 885, 985, 885, 986, 889,
+ 987, 889, 988, 893, 989, 893, 990, 897,
+ 991, 897, 992, 901, 993, 901, 994, 905,
+ 995, 905, 996, 909, 997, 909, 998, 913,
+ 999, 913, 1000, 917, 1001, 917, 1002, 921,
+ 1003, 921, 1004, 925, 1005, 925, 1006, 929,
+ 1007, 929, 1008, 833, 1009, 853, 1010, 933,
+ 1011, 773, 1013, 817, 1015, 937, 1016, 937,
+ 1017, 933, 1018, 941, 1019, 941, 1073742845, 765,
+ 1023, 769, 1073742848, 945, 1039, 949, 1073742864, 953,
+ 1071, 957, 1073742896, 953, 1103, 957, 1073742928, 945,
+ 1119, 949, 1120, 961, 1121, 961, 1122, 965,
+ 1123, 965, 1124, 969, 1125, 969, 1126, 973,
+ 1127, 973, 1128, 977, 1129, 977, 1130, 981,
+ 1131, 981, 1132, 985, 1133, 985, 1134, 989,
+ 1135, 989, 1136, 993, 1137, 993, 1138, 997,
+ 1139, 997, 1140, 1001, 1141, 1001, 1142, 1005,
+ 1143, 1005, 1144, 1009, 1145, 1009, 1146, 1013,
+ 1147, 1013, 1148, 1017, 1149, 1017, 1150, 1021,
+ 1151, 1021, 1152, 1025, 1153, 1025, 1162, 1029,
+ 1163, 1029, 1164, 1033, 1165, 1033, 1166, 1037,
+ 1167, 1037, 1168, 1041, 1169, 1041, 1170, 1045,
+ 1171, 1045, 1172, 1049, 1173, 1049, 1174, 1053,
+ 1175, 1053, 1176, 1057, 1177, 1057, 1178, 1061,
+ 1179, 1061, 1180, 1065, 1181, 1065, 1182, 1069,
+ 1183, 1069, 1184, 1073, 1185, 1073, 1186, 1077,
+ 1187, 1077, 1188, 1081, 1189, 1081, 1190, 1085,
+ 1191, 1085, 1192, 1089, 1193, 1089, 1194, 1093,
+ 1195, 1093, 1196, 1097, 1197, 1097, 1198, 1101,
+ 1199, 1101, 1200, 1105, 1201, 1105, 1202, 1109,
+ 1203, 1109, 1204, 1113, 1205, 1113, 1206, 1117,
+ 1207, 1117, 1208, 1121, 1209, 1121, 1210, 1125,
+ 1211, 1125, 1212, 1129, 1213, 1129, 1214, 1133,
+ 1215, 1133, 1216, 1137, 1217, 1141, 1218, 1141,
+ 1219, 1145, 1220, 1145, 1221, 1149, 1222, 1149,
+ 1223, 1153, 1224, 1153, 1225, 1157, 1226, 1157,
+ 1227, 1161, 1228, 1161, 1229, 1165, 1230, 1165,
+ 1231, 1137, 1232, 1169, 1233, 1169, 1234, 1173,
+ 1235, 1173, 1236, 1177, 1237, 1177, 1238, 1181,
+ 1239, 1181, 1240, 1185, 1241, 1185, 1242, 1189,
+ 1243, 1189, 1244, 1193, 1245, 1193, 1246, 1197,
+ 1247, 1197, 1248, 1201, 1249, 1201, 1250, 1205,
+ 1251, 1205, 1252, 1209, 1253, 1209, 1254, 1213,
+ 1255, 1213, 1256, 1217, 1257, 1217, 1258, 1221,
+ 1259, 1221, 1260, 1225, 1261, 1225, 1262, 1229,
+ 1263, 1229, 1264, 1233, 1265, 1233, 1266, 1237,
+ 1267, 1237, 1268, 1241, 1269, 1241, 1270, 1245,
+ 1271, 1245, 1272, 1249, 1273, 1249, 1274, 1253,
+ 1275, 1253, 1276, 1257, 1277, 1257, 1278, 1261,
+ 1279, 1261, 1280, 1265, 1281, 1265, 1282, 1269,
+ 1283, 1269, 1284, 1273, 1285, 1273, 1286, 1277,
+ 1287, 1277, 1288, 1281, 1289, 1281, 1290, 1285,
+ 1291, 1285, 1292, 1289, 1293, 1289, 1294, 1293,
+ 1295, 1293, 1296, 1297, 1297, 1297, 1298, 1301,
+ 1299, 1301, 1300, 1305, 1301, 1305, 1302, 1309,
+ 1303, 1309, 1304, 1313, 1305, 1313, 1306, 1317,
+ 1307, 1317, 1308, 1321, 1309, 1321, 1310, 1325,
+ 1311, 1325, 1312, 1329, 1313, 1329, 1314, 1333,
+ 1315, 1333, 1316, 1337, 1317, 1337, 1318, 1341,
+ 1319, 1341, 1320, 1345, 1321, 1345, 1322, 1349,
+ 1323, 1349, 1324, 1353, 1325, 1353, 1326, 1357,
+ 1327, 1357, 1073743153, 1361, 1366, 1365, 1073743201, 1361,
+ 1414, 1365, 1073746080, 1369, 4293, 1373, 4295, 1377,
+ 4301, 1381, 7545, 1385, 7549, 1389, 7680, 1393,
+ 7681, 1393, 7682, 1397, 7683, 1397, 7684, 1401,
+ 7685, 1401, 7686, 1405, 7687, 1405, 7688, 1409,
+ 7689, 1409, 7690, 1413, 7691, 1413, 7692, 1417,
+ 7693, 1417, 7694, 1421, 7695, 1421, 7696, 1425,
+ 7697, 1425, 7698, 1429, 7699, 1429, 7700, 1433,
+ 7701, 1433, 7702, 1437, 7703, 1437, 7704, 1441,
+ 7705, 1441, 7706, 1445, 7707, 1445, 7708, 1449,
+ 7709, 1449, 7710, 1453, 7711, 1453, 7712, 1457,
+ 7713, 1457, 7714, 1461, 7715, 1461, 7716, 1465,
+ 7717, 1465, 7718, 1469, 7719, 1469, 7720, 1473,
+ 7721, 1473, 7722, 1477, 7723, 1477, 7724, 1481,
+ 7725, 1481, 7726, 1485, 7727, 1485, 7728, 1489,
+ 7729, 1489, 7730, 1493, 7731, 1493, 7732, 1497,
+ 7733, 1497, 7734, 1501, 7735, 1501, 7736, 1505,
+ 7737, 1505, 7738, 1509, 7739, 1509, 7740, 1513,
+ 7741, 1513, 7742, 1517, 7743, 1517, 7744, 1521,
+ 7745, 1521, 7746, 1525, 7747, 1525, 7748, 1529,
+ 7749, 1529, 7750, 1533, 7751, 1533, 7752, 1537,
+ 7753, 1537, 7754, 1541, 7755, 1541, 7756, 1545,
+ 7757, 1545, 7758, 1549, 7759, 1549, 7760, 1553,
+ 7761, 1553, 7762, 1557, 7763, 1557, 7764, 1561,
+ 7765, 1561, 7766, 1565, 7767, 1565, 7768, 1569,
+ 7769, 1569, 7770, 1573, 7771, 1573, 7772, 1577,
+ 7773, 1577, 7774, 1581, 7775, 1581, 7776, 1585,
+ 7777, 1585, 7778, 1589, 7779, 1589, 7780, 1593,
+ 7781, 1593, 7782, 1597, 7783, 1597, 7784, 1601,
+ 7785, 1601, 7786, 1605, 7787, 1605, 7788, 1609,
+ 7789, 1609, 7790, 1613, 7791, 1613, 7792, 1617,
+ 7793, 1617, 7794, 1621, 7795, 1621, 7796, 1625,
+ 7797, 1625, 7798, 1629, 7799, 1629, 7800, 1633,
+ 7801, 1633, 7802, 1637, 7803, 1637, 7804, 1641,
+ 7805, 1641, 7806, 1645, 7807, 1645, 7808, 1649,
+ 7809, 1649, 7810, 1653, 7811, 1653, 7812, 1657,
+ 7813, 1657, 7814, 1661, 7815, 1661, 7816, 1665,
+ 7817, 1665, 7818, 1669, 7819, 1669, 7820, 1673,
+ 7821, 1673, 7822, 1677, 7823, 1677, 7824, 1681,
+ 7825, 1681, 7826, 1685, 7827, 1685, 7828, 1689,
+ 7829, 1689, 7835, 1585, 7840, 1693, 7841, 1693,
+ 7842, 1697, 7843, 1697, 7844, 1701, 7845, 1701,
+ 7846, 1705, 7847, 1705, 7848, 1709, 7849, 1709,
+ 7850, 1713, 7851, 1713, 7852, 1717, 7853, 1717,
+ 7854, 1721, 7855, 1721, 7856, 1725, 7857, 1725,
+ 7858, 1729, 7859, 1729, 7860, 1733, 7861, 1733,
+ 7862, 1737, 7863, 1737, 7864, 1741, 7865, 1741,
+ 7866, 1745, 7867, 1745, 7868, 1749, 7869, 1749,
+ 7870, 1753, 7871, 1753, 7872, 1757, 7873, 1757,
+ 7874, 1761, 7875, 1761, 7876, 1765, 7877, 1765,
+ 7878, 1769, 7879, 1769, 7880, 1773, 7881, 1773,
+ 7882, 1777, 7883, 1777, 7884, 1781, 7885, 1781,
+ 7886, 1785, 7887, 1785, 7888, 1789, 7889, 1789,
+ 7890, 1793, 7891, 1793, 7892, 1797, 7893, 1797,
+ 7894, 1801, 7895, 1801, 7896, 1805, 7897, 1805,
+ 7898, 1809, 7899, 1809, 7900, 1813, 7901, 1813,
+ 7902, 1817, 7903, 1817, 7904, 1821, 7905, 1821,
+ 7906, 1825, 7907, 1825, 7908, 1829, 7909, 1829,
+ 7910, 1833, 7911, 1833, 7912, 1837, 7913, 1837,
+ 7914, 1841, 7915, 1841, 7916, 1845, 7917, 1845,
+ 7918, 1849, 7919, 1849, 7920, 1853, 7921, 1853,
+ 7922, 1857, 7923, 1857, 7924, 1861, 7925, 1861,
+ 7926, 1865, 7927, 1865, 7928, 1869, 7929, 1869,
+ 7930, 1873, 7931, 1873, 7932, 1877, 7933, 1877,
+ 7934, 1881, 7935, 1881, 1073749760, 1885, 7943, 1889,
+ 1073749768, 1885, 7951, 1889, 1073749776, 1893, 7957, 1897,
+ 1073749784, 1893, 7965, 1897, 1073749792, 1901, 7975, 1905,
+ 1073749800, 1901, 7983, 1905, 1073749808, 1909, 7991, 1913,
+ 1073749816, 1909, 7999, 1913, 1073749824, 1917, 8005, 1921,
+ 1073749832, 1917, 8013, 1921, 8017, 1925, 8019, 1929,
+ 8021, 1933, 8023, 1937, 8025, 1925, 8027, 1929,
+ 8029, 1933, 8031, 1937, 1073749856, 1941, 8039, 1945,
+ 1073749864, 1941, 8047, 1945, 1073749872, 1949, 8049, 1953,
+ 1073749874, 1957, 8053, 1961, 1073749878, 1965, 8055, 1969,
+ 1073749880, 1973, 8057, 1977, 1073749882, 1981, 8059, 1985,
+ 1073749884, 1989, 8061, 1993, 1073749936, 1997, 8113, 2001,
+ 1073749944, 1997, 8121, 2001, 1073749946, 1949, 8123, 1953,
+ 8126, 749, 1073749960, 1957, 8139, 1961, 1073749968, 2005,
+ 8145, 2009, 1073749976, 2005, 8153, 2009, 1073749978, 1965,
+ 8155, 1969, 1073749984, 2013, 8161, 2017, 8165, 2021,
+ 1073749992, 2013, 8169, 2017, 1073749994, 1981, 8171, 1985,
+ 8172, 2021, 1073750008, 1973, 8185, 1977, 1073750010, 1989,
+ 8187, 1993};
+static const uint16_t kEcma262UnCanonicalizeMultiStrings0Size = 507;
static const MultiCharacterSpecialCase<2>
- kEcma262UnCanonicalizeMultiStrings1[83] = { // NOLINT
- {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}},
- {{8579, 8580}}, // NOLINT
- {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}},
- {{11310, 11358}}, // NOLINT
- {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}},
- {{637, 11364}}, // NOLINT
- {{570, 11365}}, {{574, 11366}}, {{11367, 11368}},
- {{11369, 11370}}, // NOLINT
- {{11371, 11372}}, {{593, 11373}}, {{625, 11374}},
- {{592, 11375}}, // NOLINT
- {{594, 11376}}, {{11378, 11379}}, {{11381, 11382}},
- {{575, 11390}}, // NOLINT
- {{576, 11391}}, {{11392, 11393}}, {{11394, 11395}},
- {{11396, 11397}}, // NOLINT
- {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}},
- {{11404, 11405}}, // NOLINT
- {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}},
- {{11412, 11413}}, // NOLINT
- {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}},
- {{11420, 11421}}, // NOLINT
- {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}},
- {{11428, 11429}}, // NOLINT
- {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}},
- {{11436, 11437}}, // NOLINT
- {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}},
- {{11444, 11445}}, // NOLINT
- {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}},
- {{11452, 11453}}, // NOLINT
- {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}},
- {{11460, 11461}}, // NOLINT
- {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}},
- {{11468, 11469}}, // NOLINT
- {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}},
- {{11476, 11477}}, // NOLINT
- {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}},
- {{11484, 11485}}, // NOLINT
- {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}},
- {{11499, 11500}}, // NOLINT
- {{11501, 11502}}, {{11506, 11507}}, {{4256, 11520}},
- {{4293, 11557}}, // NOLINT
- {{4295, 11559}}, {{4301, 11565}}, {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable1Size = 149; // NOLINT
+ kEcma262UnCanonicalizeMultiStrings1[83] = {
+ {{8498, 8526}}, {{8544, 8560}}, {{8559, 8575}}, {{8579, 8580}},
+ {{9398, 9424}}, {{9423, 9449}}, {{11264, 11312}}, {{11310, 11358}},
+ {{11360, 11361}}, {{619, 11362}}, {{7549, 11363}}, {{637, 11364}},
+ {{570, 11365}}, {{574, 11366}}, {{11367, 11368}}, {{11369, 11370}},
+ {{11371, 11372}}, {{593, 11373}}, {{625, 11374}}, {{592, 11375}},
+ {{594, 11376}}, {{11378, 11379}}, {{11381, 11382}}, {{575, 11390}},
+ {{576, 11391}}, {{11392, 11393}}, {{11394, 11395}}, {{11396, 11397}},
+ {{11398, 11399}}, {{11400, 11401}}, {{11402, 11403}}, {{11404, 11405}},
+ {{11406, 11407}}, {{11408, 11409}}, {{11410, 11411}}, {{11412, 11413}},
+ {{11414, 11415}}, {{11416, 11417}}, {{11418, 11419}}, {{11420, 11421}},
+ {{11422, 11423}}, {{11424, 11425}}, {{11426, 11427}}, {{11428, 11429}},
+ {{11430, 11431}}, {{11432, 11433}}, {{11434, 11435}}, {{11436, 11437}},
+ {{11438, 11439}}, {{11440, 11441}}, {{11442, 11443}}, {{11444, 11445}},
+ {{11446, 11447}}, {{11448, 11449}}, {{11450, 11451}}, {{11452, 11453}},
+ {{11454, 11455}}, {{11456, 11457}}, {{11458, 11459}}, {{11460, 11461}},
+ {{11462, 11463}}, {{11464, 11465}}, {{11466, 11467}}, {{11468, 11469}},
+ {{11470, 11471}}, {{11472, 11473}}, {{11474, 11475}}, {{11476, 11477}},
+ {{11478, 11479}}, {{11480, 11481}}, {{11482, 11483}}, {{11484, 11485}},
+ {{11486, 11487}}, {{11488, 11489}}, {{11490, 11491}}, {{11499, 11500}},
+ {{11501, 11502}}, {{11506, 11507}}, {{4256, 11520}}, {{4293, 11557}},
+ {{4295, 11559}}, {{4301, 11565}}, {{kSentinel}}};
+static const uint16_t kEcma262UnCanonicalizeTable1Size = 149;
static const int32_t kEcma262UnCanonicalizeTable1[298] = {
306, 1, 334, 1, 1073742176, 5, 367, 9,
- 1073742192, 5, 383, 9, 387, 13, 388, 13, // NOLINT
+ 1073742192, 5, 383, 9, 387, 13, 388, 13,
1073743030, 17, 1231, 21, 1073743056, 17, 1257, 21,
- 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29, // NOLINT
+ 1073744896, 25, 3118, 29, 1073744944, 25, 3166, 29,
3168, 33, 3169, 33, 3170, 37, 3171, 41,
- 3172, 45, 3173, 49, 3174, 53, 3175, 57, // NOLINT
+ 3172, 45, 3173, 49, 3174, 53, 3175, 57,
3176, 57, 3177, 61, 3178, 61, 3179, 65,
- 3180, 65, 3181, 69, 3182, 73, 3183, 77, // NOLINT
+ 3180, 65, 3181, 69, 3182, 73, 3183, 77,
3184, 81, 3186, 85, 3187, 85, 3189, 89,
- 3190, 89, 1073745022, 93, 3199, 97, 3200, 101, // NOLINT
+ 3190, 89, 1073745022, 93, 3199, 97, 3200, 101,
3201, 101, 3202, 105, 3203, 105, 3204, 109,
- 3205, 109, 3206, 113, 3207, 113, 3208, 117, // NOLINT
+ 3205, 109, 3206, 113, 3207, 113, 3208, 117,
3209, 117, 3210, 121, 3211, 121, 3212, 125,
- 3213, 125, 3214, 129, 3215, 129, 3216, 133, // NOLINT
+ 3213, 125, 3214, 129, 3215, 129, 3216, 133,
3217, 133, 3218, 137, 3219, 137, 3220, 141,
- 3221, 141, 3222, 145, 3223, 145, 3224, 149, // NOLINT
+ 3221, 141, 3222, 145, 3223, 145, 3224, 149,
3225, 149, 3226, 153, 3227, 153, 3228, 157,
- 3229, 157, 3230, 161, 3231, 161, 3232, 165, // NOLINT
+ 3229, 157, 3230, 161, 3231, 161, 3232, 165,
3233, 165, 3234, 169, 3235, 169, 3236, 173,
- 3237, 173, 3238, 177, 3239, 177, 3240, 181, // NOLINT
+ 3237, 173, 3238, 177, 3239, 177, 3240, 181,
3241, 181, 3242, 185, 3243, 185, 3244, 189,
- 3245, 189, 3246, 193, 3247, 193, 3248, 197, // NOLINT
+ 3245, 189, 3246, 193, 3247, 193, 3248, 197,
3249, 197, 3250, 201, 3251, 201, 3252, 205,
- 3253, 205, 3254, 209, 3255, 209, 3256, 213, // NOLINT
+ 3253, 205, 3254, 209, 3255, 209, 3256, 213,
3257, 213, 3258, 217, 3259, 217, 3260, 221,
- 3261, 221, 3262, 225, 3263, 225, 3264, 229, // NOLINT
+ 3261, 221, 3262, 225, 3263, 225, 3264, 229,
3265, 229, 3266, 233, 3267, 233, 3268, 237,
- 3269, 237, 3270, 241, 3271, 241, 3272, 245, // NOLINT
+ 3269, 237, 3270, 241, 3271, 241, 3272, 245,
3273, 245, 3274, 249, 3275, 249, 3276, 253,
- 3277, 253, 3278, 257, 3279, 257, 3280, 261, // NOLINT
+ 3277, 253, 3278, 257, 3279, 257, 3280, 261,
3281, 261, 3282, 265, 3283, 265, 3284, 269,
- 3285, 269, 3286, 273, 3287, 273, 3288, 277, // NOLINT
+ 3285, 269, 3286, 273, 3287, 273, 3288, 277,
3289, 277, 3290, 281, 3291, 281, 3292, 285,
- 3293, 285, 3294, 289, 3295, 289, 3296, 293, // NOLINT
+ 3293, 285, 3294, 289, 3295, 289, 3296, 293,
3297, 293, 3298, 297, 3299, 297, 3307, 301,
- 3308, 301, 3309, 305, 3310, 305, 3314, 309, // NOLINT
+ 3308, 301, 3309, 305, 3310, 305, 3314, 309,
3315, 309, 1073745152, 313, 3365, 317, 3367, 321,
- 3373, 325}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 83; // NOLINT
+ 3373, 325};
+static const uint16_t kEcma262UnCanonicalizeMultiStrings1Size = 83;
static const MultiCharacterSpecialCase<2>
- kEcma262UnCanonicalizeMultiStrings5[104] = { // NOLINT
- {{42560, 42561}}, {{42562, 42563}},
- {{42564, 42565}}, {{42566, 42567}}, // NOLINT
- {{42568, 42569}}, {{42570, 42571}},
- {{42572, 42573}}, {{42574, 42575}}, // NOLINT
- {{42576, 42577}}, {{42578, 42579}},
- {{42580, 42581}}, {{42582, 42583}}, // NOLINT
- {{42584, 42585}}, {{42586, 42587}},
- {{42588, 42589}}, {{42590, 42591}}, // NOLINT
- {{42592, 42593}}, {{42594, 42595}},
- {{42596, 42597}}, {{42598, 42599}}, // NOLINT
- {{42600, 42601}}, {{42602, 42603}},
- {{42604, 42605}}, {{42624, 42625}}, // NOLINT
- {{42626, 42627}}, {{42628, 42629}},
- {{42630, 42631}}, {{42632, 42633}}, // NOLINT
- {{42634, 42635}}, {{42636, 42637}},
- {{42638, 42639}}, {{42640, 42641}}, // NOLINT
- {{42642, 42643}}, {{42644, 42645}},
- {{42646, 42647}}, {{42648, 42649}}, // NOLINT
- {{42650, 42651}}, {{42786, 42787}},
- {{42788, 42789}}, {{42790, 42791}}, // NOLINT
- {{42792, 42793}}, {{42794, 42795}},
- {{42796, 42797}}, {{42798, 42799}}, // NOLINT
- {{42802, 42803}}, {{42804, 42805}},
- {{42806, 42807}}, {{42808, 42809}}, // NOLINT
- {{42810, 42811}}, {{42812, 42813}},
- {{42814, 42815}}, {{42816, 42817}}, // NOLINT
- {{42818, 42819}}, {{42820, 42821}},
- {{42822, 42823}}, {{42824, 42825}}, // NOLINT
- {{42826, 42827}}, {{42828, 42829}},
- {{42830, 42831}}, {{42832, 42833}}, // NOLINT
- {{42834, 42835}}, {{42836, 42837}},
- {{42838, 42839}}, {{42840, 42841}}, // NOLINT
- {{42842, 42843}}, {{42844, 42845}},
- {{42846, 42847}}, {{42848, 42849}}, // NOLINT
- {{42850, 42851}}, {{42852, 42853}},
- {{42854, 42855}}, {{42856, 42857}}, // NOLINT
- {{42858, 42859}}, {{42860, 42861}},
- {{42862, 42863}}, {{42873, 42874}}, // NOLINT
- {{42875, 42876}}, {{7545, 42877}},
- {{42878, 42879}}, {{42880, 42881}}, // NOLINT
- {{42882, 42883}}, {{42884, 42885}},
- {{42886, 42887}}, {{42891, 42892}}, // NOLINT
- {{613, 42893}}, {{42896, 42897}},
- {{42898, 42899}}, {{42902, 42903}}, // NOLINT
- {{42904, 42905}}, {{42906, 42907}},
- {{42908, 42909}}, {{42910, 42911}}, // NOLINT
- {{42912, 42913}}, {{42914, 42915}},
- {{42916, 42917}}, {{42918, 42919}}, // NOLINT
- {{42920, 42921}}, {{614, 42922}},
- {{604, 42923}}, {{609, 42924}}, // NOLINT
- {{620, 42925}}, {{670, 42928}},
- {{647, 42929}}, {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable5Size = 198; // NOLINT
-static const int32_t
- kEcma262UnCanonicalizeTable5[396] =
- {1600, 1, 1601, 1, 1602, 5, 1603, 5,
- 1604, 9, 1605, 9, 1606, 13, 1607, 13, // NOLINT
- 1608, 17, 1609, 17, 1610, 21, 1611, 21,
- 1612, 25, 1613, 25, 1614, 29, 1615, 29, // NOLINT
- 1616, 33, 1617, 33, 1618, 37, 1619, 37,
- 1620, 41, 1621, 41, 1622, 45, 1623, 45, // NOLINT
- 1624, 49, 1625, 49, 1626, 53, 1627, 53,
- 1628, 57, 1629, 57, 1630, 61, 1631, 61, // NOLINT
- 1632, 65, 1633, 65, 1634, 69, 1635, 69,
- 1636, 73, 1637, 73, 1638, 77, 1639, 77, // NOLINT
- 1640, 81, 1641, 81, 1642, 85, 1643, 85,
- 1644, 89, 1645, 89, 1664, 93, 1665, 93, // NOLINT
- 1666, 97, 1667, 97, 1668, 101, 1669, 101,
- 1670, 105, 1671, 105, 1672, 109, 1673, 109, // NOLINT
- 1674, 113, 1675, 113, 1676, 117, 1677, 117,
- 1678, 121, 1679, 121, 1680, 125, 1681, 125, // NOLINT
- 1682, 129, 1683, 129, 1684, 133, 1685, 133,
- 1686, 137, 1687, 137, 1688, 141, 1689, 141, // NOLINT
- 1690, 145, 1691, 145, 1826, 149, 1827, 149,
- 1828, 153, 1829, 153, 1830, 157, 1831, 157, // NOLINT
- 1832, 161, 1833, 161, 1834, 165, 1835, 165,
- 1836, 169, 1837, 169, 1838, 173, 1839, 173, // NOLINT
- 1842, 177, 1843, 177, 1844, 181, 1845, 181,
- 1846, 185, 1847, 185, 1848, 189, 1849, 189, // NOLINT
- 1850, 193, 1851, 193, 1852, 197, 1853, 197,
- 1854, 201, 1855, 201, 1856, 205, 1857, 205, // NOLINT
- 1858, 209, 1859, 209, 1860, 213, 1861, 213,
- 1862, 217, 1863, 217, 1864, 221, 1865, 221, // NOLINT
- 1866, 225, 1867, 225, 1868, 229, 1869, 229,
- 1870, 233, 1871, 233, 1872, 237, 1873, 237, // NOLINT
- 1874, 241, 1875, 241, 1876, 245, 1877, 245,
- 1878, 249, 1879, 249, 1880, 253, 1881, 253, // NOLINT
- 1882, 257, 1883, 257, 1884, 261, 1885, 261,
- 1886, 265, 1887, 265, 1888, 269, 1889, 269, // NOLINT
- 1890, 273, 1891, 273, 1892, 277, 1893, 277,
- 1894, 281, 1895, 281, 1896, 285, 1897, 285, // NOLINT
- 1898, 289, 1899, 289, 1900, 293, 1901, 293,
- 1902, 297, 1903, 297, 1913, 301, 1914, 301, // NOLINT
- 1915, 305, 1916, 305, 1917, 309, 1918, 313,
- 1919, 313, 1920, 317, 1921, 317, 1922, 321, // NOLINT
- 1923, 321, 1924, 325, 1925, 325, 1926, 329,
- 1927, 329, 1931, 333, 1932, 333, 1933, 337, // NOLINT
- 1936, 341, 1937, 341, 1938, 345, 1939, 345,
- 1942, 349, 1943, 349, 1944, 353, 1945, 353, // NOLINT
- 1946, 357, 1947, 357, 1948, 361, 1949, 361,
- 1950, 365, 1951, 365, 1952, 369, 1953, 369, // NOLINT
- 1954, 373, 1955, 373, 1956, 377, 1957, 377,
- 1958, 381, 1959, 381, 1960, 385, 1961, 385, // NOLINT
- 1962, 389, 1963, 393, 1964, 397, 1965, 401,
- 1968, 405, 1969, 409}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings5Size = 104; // NOLINT
+ kEcma262UnCanonicalizeMultiStrings5[104] = {
+ {{42560, 42561}}, {{42562, 42563}}, {{42564, 42565}}, {{42566, 42567}},
+ {{42568, 42569}}, {{42570, 42571}}, {{42572, 42573}}, {{42574, 42575}},
+ {{42576, 42577}}, {{42578, 42579}}, {{42580, 42581}}, {{42582, 42583}},
+ {{42584, 42585}}, {{42586, 42587}}, {{42588, 42589}}, {{42590, 42591}},
+ {{42592, 42593}}, {{42594, 42595}}, {{42596, 42597}}, {{42598, 42599}},
+ {{42600, 42601}}, {{42602, 42603}}, {{42604, 42605}}, {{42624, 42625}},
+ {{42626, 42627}}, {{42628, 42629}}, {{42630, 42631}}, {{42632, 42633}},
+ {{42634, 42635}}, {{42636, 42637}}, {{42638, 42639}}, {{42640, 42641}},
+ {{42642, 42643}}, {{42644, 42645}}, {{42646, 42647}}, {{42648, 42649}},
+ {{42650, 42651}}, {{42786, 42787}}, {{42788, 42789}}, {{42790, 42791}},
+ {{42792, 42793}}, {{42794, 42795}}, {{42796, 42797}}, {{42798, 42799}},
+ {{42802, 42803}}, {{42804, 42805}}, {{42806, 42807}}, {{42808, 42809}},
+ {{42810, 42811}}, {{42812, 42813}}, {{42814, 42815}}, {{42816, 42817}},
+ {{42818, 42819}}, {{42820, 42821}}, {{42822, 42823}}, {{42824, 42825}},
+ {{42826, 42827}}, {{42828, 42829}}, {{42830, 42831}}, {{42832, 42833}},
+ {{42834, 42835}}, {{42836, 42837}}, {{42838, 42839}}, {{42840, 42841}},
+ {{42842, 42843}}, {{42844, 42845}}, {{42846, 42847}}, {{42848, 42849}},
+ {{42850, 42851}}, {{42852, 42853}}, {{42854, 42855}}, {{42856, 42857}},
+ {{42858, 42859}}, {{42860, 42861}}, {{42862, 42863}}, {{42873, 42874}},
+ {{42875, 42876}}, {{7545, 42877}}, {{42878, 42879}}, {{42880, 42881}},
+ {{42882, 42883}}, {{42884, 42885}}, {{42886, 42887}}, {{42891, 42892}},
+ {{613, 42893}}, {{42896, 42897}}, {{42898, 42899}}, {{42902, 42903}},
+ {{42904, 42905}}, {{42906, 42907}}, {{42908, 42909}}, {{42910, 42911}},
+ {{42912, 42913}}, {{42914, 42915}}, {{42916, 42917}}, {{42918, 42919}},
+ {{42920, 42921}}, {{614, 42922}}, {{604, 42923}}, {{609, 42924}},
+ {{620, 42925}}, {{670, 42928}}, {{647, 42929}}, {{kSentinel}}};
+static const uint16_t kEcma262UnCanonicalizeTable5Size = 198;
+static const int32_t kEcma262UnCanonicalizeTable5[396] = {
+ 1600, 1, 1601, 1, 1602, 5, 1603, 5, 1604, 9, 1605, 9, 1606, 13,
+ 1607, 13, 1608, 17, 1609, 17, 1610, 21, 1611, 21, 1612, 25, 1613, 25,
+ 1614, 29, 1615, 29, 1616, 33, 1617, 33, 1618, 37, 1619, 37, 1620, 41,
+ 1621, 41, 1622, 45, 1623, 45, 1624, 49, 1625, 49, 1626, 53, 1627, 53,
+ 1628, 57, 1629, 57, 1630, 61, 1631, 61, 1632, 65, 1633, 65, 1634, 69,
+ 1635, 69, 1636, 73, 1637, 73, 1638, 77, 1639, 77, 1640, 81, 1641, 81,
+ 1642, 85, 1643, 85, 1644, 89, 1645, 89, 1664, 93, 1665, 93, 1666, 97,
+ 1667, 97, 1668, 101, 1669, 101, 1670, 105, 1671, 105, 1672, 109, 1673, 109,
+ 1674, 113, 1675, 113, 1676, 117, 1677, 117, 1678, 121, 1679, 121, 1680, 125,
+ 1681, 125, 1682, 129, 1683, 129, 1684, 133, 1685, 133, 1686, 137, 1687, 137,
+ 1688, 141, 1689, 141, 1690, 145, 1691, 145, 1826, 149, 1827, 149, 1828, 153,
+ 1829, 153, 1830, 157, 1831, 157, 1832, 161, 1833, 161, 1834, 165, 1835, 165,
+ 1836, 169, 1837, 169, 1838, 173, 1839, 173, 1842, 177, 1843, 177, 1844, 181,
+ 1845, 181, 1846, 185, 1847, 185, 1848, 189, 1849, 189, 1850, 193, 1851, 193,
+ 1852, 197, 1853, 197, 1854, 201, 1855, 201, 1856, 205, 1857, 205, 1858, 209,
+ 1859, 209, 1860, 213, 1861, 213, 1862, 217, 1863, 217, 1864, 221, 1865, 221,
+ 1866, 225, 1867, 225, 1868, 229, 1869, 229, 1870, 233, 1871, 233, 1872, 237,
+ 1873, 237, 1874, 241, 1875, 241, 1876, 245, 1877, 245, 1878, 249, 1879, 249,
+ 1880, 253, 1881, 253, 1882, 257, 1883, 257, 1884, 261, 1885, 261, 1886, 265,
+ 1887, 265, 1888, 269, 1889, 269, 1890, 273, 1891, 273, 1892, 277, 1893, 277,
+ 1894, 281, 1895, 281, 1896, 285, 1897, 285, 1898, 289, 1899, 289, 1900, 293,
+ 1901, 293, 1902, 297, 1903, 297, 1913, 301, 1914, 301, 1915, 305, 1916, 305,
+ 1917, 309, 1918, 313, 1919, 313, 1920, 317, 1921, 317, 1922, 321, 1923, 321,
+ 1924, 325, 1925, 325, 1926, 329, 1927, 329, 1931, 333, 1932, 333, 1933, 337,
+ 1936, 341, 1937, 341, 1938, 345, 1939, 345, 1942, 349, 1943, 349, 1944, 353,
+ 1945, 353, 1946, 357, 1947, 357, 1948, 361, 1949, 361, 1950, 365, 1951, 365,
+ 1952, 369, 1953, 369, 1954, 373, 1955, 373, 1956, 377, 1957, 377, 1958, 381,
+ 1959, 381, 1960, 385, 1961, 385, 1962, 389, 1963, 393, 1964, 397, 1965, 401,
+ 1968, 405, 1969, 409};
+static const uint16_t kEcma262UnCanonicalizeMultiStrings5Size = 104;
static const MultiCharacterSpecialCase<2>
- kEcma262UnCanonicalizeMultiStrings7[3] = { // NOLINT
- {{65313, 65345}},
- {{65338, 65370}},
- {{kSentinel}}}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeTable7Size = 4; // NOLINT
-static const int32_t kEcma262UnCanonicalizeTable7[8] = {
- 1073749793, 1, 7994, 5, 1073749825, 1, 8026, 5}; // NOLINT
-static const uint16_t kEcma262UnCanonicalizeMultiStrings7Size = 3; // NOLINT
+ kEcma262UnCanonicalizeMultiStrings7[3] = {
+ {{65313, 65345}}, {{65338, 65370}}, {{kSentinel}}};
+static const uint16_t kEcma262UnCanonicalizeTable7Size = 4;
+static const int32_t kEcma262UnCanonicalizeTable7[8] = {1073749793, 1, 7994, 5,
+ 1073749825, 1, 8026, 5};
+static const uint16_t kEcma262UnCanonicalizeMultiStrings7Size = 3;
int Ecma262UnCanonicalize::Convert(uchar c, uchar n, uchar* result,
bool* allow_caching_ptr) {
int chunk_index = c >> 13;
@@ -3057,46 +2485,43 @@ int Ecma262UnCanonicalize::Convert(uchar c, uchar n, uchar* result,
}
static const MultiCharacterSpecialCase<1>
- kCanonicalizationRangeMultiStrings0[1] = { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kCanonicalizationRangeTable0Size = 70; // NOLINT
+ kCanonicalizationRangeMultiStrings0[1] = {{{kSentinel}}};
+static const uint16_t kCanonicalizationRangeTable0Size = 70;
static const int32_t kCanonicalizationRangeTable0[140] = {
1073741889, 100, 90, 0, 1073741921, 100, 122, 0,
- 1073742016, 88, 214, 0, 1073742040, 24, 222, 0, // NOLINT
+ 1073742016, 88, 214, 0, 1073742040, 24, 222, 0,
1073742048, 88, 246, 0, 1073742072, 24, 254, 0,
- 1073742715, 8, 893, 0, 1073742728, 8, 906, 0, // NOLINT
+ 1073742715, 8, 893, 0, 1073742728, 8, 906, 0,
1073742749, 8, 927, 0, 1073742759, 16, 939, 0,
- 1073742765, 8, 943, 0, 1073742781, 8, 959, 0, // NOLINT
+ 1073742765, 8, 943, 0, 1073742781, 8, 959, 0,
1073742791, 16, 971, 0, 1073742845, 8, 1023, 0,
- 1073742848, 60, 1039, 0, 1073742864, 124, 1071, 0, // NOLINT
+ 1073742848, 60, 1039, 0, 1073742864, 124, 1071, 0,
1073742896, 124, 1103, 0, 1073742928, 60, 1119, 0,
- 1073743153, 148, 1366, 0, 1073743201, 148, 1414, 0, // NOLINT
+ 1073743153, 148, 1366, 0, 1073743201, 148, 1414, 0,
1073746080, 148, 4293, 0, 1073749760, 28, 7943, 0,
- 1073749768, 28, 7951, 0, 1073749776, 20, 7957, 0, // NOLINT
+ 1073749768, 28, 7951, 0, 1073749776, 20, 7957, 0,
1073749784, 20, 7965, 0, 1073749792, 28, 7975, 0,
- 1073749800, 28, 7983, 0, 1073749808, 28, 7991, 0, // NOLINT
+ 1073749800, 28, 7983, 0, 1073749808, 28, 7991, 0,
1073749816, 28, 7999, 0, 1073749824, 20, 8005, 0,
- 1073749832, 20, 8013, 0, 1073749856, 28, 8039, 0, // NOLINT
+ 1073749832, 20, 8013, 0, 1073749856, 28, 8039, 0,
1073749864, 28, 8047, 0, 1073749874, 12, 8053, 0,
- 1073749960, 12, 8139, 0}; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings0Size = 1; // NOLINT
+ 1073749960, 12, 8139, 0};
+static const uint16_t kCanonicalizationRangeMultiStrings0Size = 1;
static const MultiCharacterSpecialCase<1>
- kCanonicalizationRangeMultiStrings1[1] = { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kCanonicalizationRangeTable1Size = 14; // NOLINT
+ kCanonicalizationRangeMultiStrings1[1] = {{{kSentinel}}};
+static const uint16_t kCanonicalizationRangeTable1Size = 14;
static const int32_t kCanonicalizationRangeTable1[28] = {
1073742176, 60, 367, 0, 1073742192, 60, 383, 0,
- 1073743030, 100, 1231, 0, 1073743056, 100, 1257, 0, // NOLINT
+ 1073743030, 100, 1231, 0, 1073743056, 100, 1257, 0,
1073744896, 184, 3118, 0, 1073744944, 184, 3166, 0,
- 1073745152, 148, 3365, 0}; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings1Size = 1; // NOLINT
+ 1073745152, 148, 3365, 0};
+static const uint16_t kCanonicalizationRangeMultiStrings1Size = 1;
static const MultiCharacterSpecialCase<1>
- kCanonicalizationRangeMultiStrings7[1] = { // NOLINT
- {{kSentinel}}}; // NOLINT
-static const uint16_t kCanonicalizationRangeTable7Size = 4; // NOLINT
+ kCanonicalizationRangeMultiStrings7[1] = {{{kSentinel}}};
+static const uint16_t kCanonicalizationRangeTable7Size = 4;
static const int32_t kCanonicalizationRangeTable7[8] = {
- 1073749793, 100, 7994, 0, 1073749825, 100, 8026, 0}; // NOLINT
-static const uint16_t kCanonicalizationRangeMultiStrings7Size = 1; // NOLINT
+ 1073749793, 100, 7994, 0, 1073749825, 100, 8026, 0};
+static const uint16_t kCanonicalizationRangeMultiStrings7Size = 1;
int CanonicalizationRange::Convert(uchar c, uchar n, uchar* result,
bool* allow_caching_ptr) {
int chunk_index = c >> 13;
@@ -3121,71 +2546,63 @@ int CanonicalizationRange::Convert(uchar c, uchar n, uchar* result,
const uchar UnicodeData::kMaxCodePoint = 0xFFFD;
int UnicodeData::GetByteCount() {
- return kUppercaseTable0Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable1Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable5Size * sizeof(int32_t) // NOLINT
- + kUppercaseTable7Size * sizeof(int32_t) // NOLINT
- + kLetterTable0Size * sizeof(int32_t) // NOLINT
- + kLetterTable1Size * sizeof(int32_t) // NOLINT
- + kLetterTable2Size * sizeof(int32_t) // NOLINT
- + kLetterTable3Size * sizeof(int32_t) // NOLINT
- + kLetterTable4Size * sizeof(int32_t) // NOLINT
- + kLetterTable5Size * sizeof(int32_t) // NOLINT
- + kLetterTable6Size * sizeof(int32_t) // NOLINT
- + kLetterTable7Size * sizeof(int32_t) // NOLINT
- + kID_StartTable0Size * sizeof(int32_t) // NOLINT
- + kID_StartTable1Size * sizeof(int32_t) // NOLINT
- + kID_StartTable2Size * sizeof(int32_t) // NOLINT
- + kID_StartTable3Size * sizeof(int32_t) // NOLINT
- + kID_StartTable4Size * sizeof(int32_t) // NOLINT
- + kID_StartTable5Size * sizeof(int32_t) // NOLINT
- + kID_StartTable6Size * sizeof(int32_t) // NOLINT
- + kID_StartTable7Size * sizeof(int32_t) // NOLINT
- + kID_ContinueTable0Size * sizeof(int32_t) // NOLINT
- + kID_ContinueTable1Size * sizeof(int32_t) // NOLINT
- + kID_ContinueTable5Size * sizeof(int32_t) // NOLINT
- + kID_ContinueTable7Size * sizeof(int32_t) // NOLINT
- + kWhiteSpaceTable0Size * sizeof(int32_t) // NOLINT
- + kWhiteSpaceTable1Size * sizeof(int32_t) // NOLINT
- + kWhiteSpaceTable7Size * sizeof(int32_t) // NOLINT
- + kToLowercaseMultiStrings0Size *
- sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kToLowercaseMultiStrings1Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToLowercaseMultiStrings5Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToLowercaseMultiStrings7Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings0Size *
- sizeof(MultiCharacterSpecialCase<3>) // NOLINT
- + kToUppercaseMultiStrings1Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings5Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kToUppercaseMultiStrings7Size *
- sizeof(MultiCharacterSpecialCase<3>) // NOLINT
- + kEcma262CanonicalizeMultiStrings0Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings1Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings5Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262CanonicalizeMultiStrings7Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings0Size *
- sizeof(MultiCharacterSpecialCase<4>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings1Size *
- sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings5Size *
- sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kEcma262UnCanonicalizeMultiStrings7Size *
- sizeof(MultiCharacterSpecialCase<2>) // NOLINT
- + kCanonicalizationRangeMultiStrings0Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kCanonicalizationRangeMultiStrings1Size *
- sizeof(MultiCharacterSpecialCase<1>) // NOLINT
- + kCanonicalizationRangeMultiStrings7Size *
- sizeof(MultiCharacterSpecialCase<1>); // NOLINT
+ return kUppercaseTable0Size * sizeof(int32_t) +
+ kUppercaseTable1Size * sizeof(int32_t) +
+ kUppercaseTable5Size * sizeof(int32_t) +
+ kUppercaseTable7Size * sizeof(int32_t) +
+ kLetterTable0Size * sizeof(int32_t) +
+ kLetterTable1Size * sizeof(int32_t) +
+ kLetterTable2Size * sizeof(int32_t) +
+ kLetterTable3Size * sizeof(int32_t) +
+ kLetterTable4Size * sizeof(int32_t) +
+ kLetterTable5Size * sizeof(int32_t) +
+ kLetterTable6Size * sizeof(int32_t) +
+ kLetterTable7Size * sizeof(int32_t) +
+ kID_StartTable0Size * sizeof(int32_t) +
+ kID_StartTable1Size * sizeof(int32_t) +
+ kID_StartTable2Size * sizeof(int32_t) +
+ kID_StartTable3Size * sizeof(int32_t) +
+ kID_StartTable4Size * sizeof(int32_t) +
+ kID_StartTable5Size * sizeof(int32_t) +
+ kID_StartTable6Size * sizeof(int32_t) +
+ kID_StartTable7Size * sizeof(int32_t) +
+ kID_ContinueTable0Size * sizeof(int32_t) +
+ kID_ContinueTable1Size * sizeof(int32_t) +
+ kID_ContinueTable5Size * sizeof(int32_t) +
+ kID_ContinueTable7Size * sizeof(int32_t) +
+ kWhiteSpaceTable0Size * sizeof(int32_t) +
+ kWhiteSpaceTable1Size * sizeof(int32_t) +
+ kWhiteSpaceTable7Size * sizeof(int32_t) +
+ kToLowercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<2>) +
+ kToLowercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) +
+ kToLowercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>) +
+ kToLowercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<1>) +
+ kToUppercaseMultiStrings0Size * sizeof(MultiCharacterSpecialCase<3>) +
+ kToUppercaseMultiStrings1Size * sizeof(MultiCharacterSpecialCase<1>) +
+ kToUppercaseMultiStrings5Size * sizeof(MultiCharacterSpecialCase<1>) +
+ kToUppercaseMultiStrings7Size * sizeof(MultiCharacterSpecialCase<3>) +
+ kEcma262CanonicalizeMultiStrings0Size *
+ sizeof(MultiCharacterSpecialCase<1>) +
+ kEcma262CanonicalizeMultiStrings1Size *
+ sizeof(MultiCharacterSpecialCase<1>) +
+ kEcma262CanonicalizeMultiStrings5Size *
+ sizeof(MultiCharacterSpecialCase<1>) +
+ kEcma262CanonicalizeMultiStrings7Size *
+ sizeof(MultiCharacterSpecialCase<1>) +
+ kEcma262UnCanonicalizeMultiStrings0Size *
+ sizeof(MultiCharacterSpecialCase<4>) +
+ kEcma262UnCanonicalizeMultiStrings1Size *
+ sizeof(MultiCharacterSpecialCase<2>) +
+ kEcma262UnCanonicalizeMultiStrings5Size *
+ sizeof(MultiCharacterSpecialCase<2>) +
+ kEcma262UnCanonicalizeMultiStrings7Size *
+ sizeof(MultiCharacterSpecialCase<2>) +
+ kCanonicalizationRangeMultiStrings0Size *
+ sizeof(MultiCharacterSpecialCase<1>) +
+ kCanonicalizationRangeMultiStrings1Size *
+ sizeof(MultiCharacterSpecialCase<1>) +
+ kCanonicalizationRangeMultiStrings7Size *
+ sizeof(MultiCharacterSpecialCase<1>);
}
#endif // !V8_INTL_SUPPORT
diff --git a/chromium/v8/src/third_party/vtune/BUILD.gn b/chromium/v8/src/third_party/vtune/BUILD.gn
index f699ab147ef..e8582dbb793 100644
--- a/chromium/v8/src/third_party/vtune/BUILD.gn
+++ b/chromium/v8/src/third_party/vtune/BUILD.gn
@@ -22,9 +22,7 @@ static_library("v8_vtune") {
"vtune-jit.h",
]
configs += [ ":vtune_ittapi" ]
- deps = [
- "../../..:v8",
- ]
+ deps = [ "../../..:v8" ]
}
static_library("v8_vtune_trace_mark") {
@@ -33,16 +31,13 @@ static_library("v8_vtune_trace_mark") {
"vtuneapi.cc",
"vtuneapi.h",
]
- deps = [
- ":ittnotify",
- ]
+ deps = [ ":ittnotify" ]
}
static_library("ittnotify") {
sources = [
"//third_party/ittapi/include/ittnotify.h",
"//third_party/ittapi/include/legacy/ittnotify.h",
- "//third_party/ittapi/src/ittnotify/disable-warnings.h",
"//third_party/ittapi/src/ittnotify/ittnotify_config.h",
"//third_party/ittapi/src/ittnotify/ittnotify_static.c",
"//third_party/ittapi/src/ittnotify/ittnotify_static.h",
diff --git a/chromium/v8/src/torque/class-debug-reader-generator.cc b/chromium/v8/src/torque/class-debug-reader-generator.cc
index 19ac671bd79..5f45ea3adef 100644
--- a/chromium/v8/src/torque/class-debug-reader-generator.cc
+++ b/chromium/v8/src/torque/class-debug-reader-generator.cc
@@ -285,6 +285,11 @@ void GenerateFieldValueAccessor(const Field& field,
cc_contents << " d::MemoryAccessResult validity = accessor("
<< address_getter << "()" << index_offset
<< ", reinterpret_cast<uint8_t*>(&value), sizeof(value));\n";
+#ifdef V8_MAP_PACKING
+ if (field_getter == "GetMapValue") {
+ cc_contents << " value = i::MapWord::Unpack(value);\n";
+ }
+#endif
cc_contents << " return {validity, "
<< (debug_field_type.IsTagged()
? "EnsureDecompressed(value, address_)"
diff --git a/chromium/v8/src/torque/constants.h b/chromium/v8/src/torque/constants.h
index 2a4e6fcb5b3..bb0ae2b69ef 100644
--- a/chromium/v8/src/torque/constants.h
+++ b/chromium/v8/src/torque/constants.h
@@ -107,15 +107,15 @@ static const char* const ANNOTATION_EXPORT = "@export";
static const char* const ANNOTATION_DO_NOT_GENERATE_CAST = "@doNotGenerateCast";
static const char* const ANNOTATION_USE_PARENT_TYPE_CHECKER =
"@useParentTypeChecker";
-// Generate C++ accessors with relaxed write semantics.
-// Weak<T> and MaybeObject fields always use relaxed write.
-static const char* const ANNOTATION_RELAXED_WRITE = "@relaxedWrite";
-// Generate C++ accessors with relaxed read semantics.
-static const char* const ANNOTATION_RELAXED_READ = "@relaxedRead";
-// Generate C++ accessors with release write semantics.
-static const char* const ANNOTATION_RELEASE_WRITE = "@releaseWrite";
-// Generate C++ accessors with acquire read semantics.
-static const char* const ANNOTATION_ACQUIRE_READ = "@acquireRead";
+// Generate C++ accessors with relaxed store semantics.
+// Weak<T> and MaybeObject fields always use relaxed store.
+static const char* const ANNOTATION_CPP_RELAXED_STORE = "@cppRelaxedStore";
+// Generate C++ accessors with relaxed load semantics.
+static const char* const ANNOTATION_CPP_RELAXED_LOAD = "@cppRelaxedLoad";
+// Generate C++ accessors with release store semantics.
+static const char* const ANNOTATION_CPP_RELEASE_STORE = "@cppReleaseStore";
+// Generate C++ accessors with acquire load semantics.
+static const char* const ANNOTATION_CPP_ACQUIRE_LOAD = "@cppAcquireLoad";
inline bool IsConstexprName(const std::string& name) {
return name.substr(0, std::strlen(CONSTEXPR_TYPE_PREFIX)) ==
diff --git a/chromium/v8/src/torque/global-context.h b/chromium/v8/src/torque/global-context.h
index 403502b67bf..754d16e4a03 100644
--- a/chromium/v8/src/torque/global-context.h
+++ b/chromium/v8/src/torque/global-context.h
@@ -79,6 +79,8 @@ class GlobalContext : public ContextualClass<GlobalContext> {
std::stringstream class_definition_inline_headerfile;
std::stringstream class_definition_ccfile;
+
+ std::set<SourceId> required_builtin_includes;
};
static PerFileStreams& GeneratedPerFile(SourceId file) {
PerFileStreams& result = Get().generated_per_file_[file];
diff --git a/chromium/v8/src/torque/implementation-visitor.cc b/chromium/v8/src/torque/implementation-visitor.cc
index a2cf0fee866..b35a45d7b65 100644
--- a/chromium/v8/src/torque/implementation-visitor.cc
+++ b/chromium/v8/src/torque/implementation-visitor.cc
@@ -11,12 +11,14 @@
#include "src/base/optional.h"
#include "src/common/globals.h"
#include "src/torque/cc-generator.h"
+#include "src/torque/cfg.h"
#include "src/torque/constants.h"
#include "src/torque/csa-generator.h"
#include "src/torque/declaration-visitor.h"
#include "src/torque/global-context.h"
#include "src/torque/parameter-difference.h"
#include "src/torque/server-data.h"
+#include "src/torque/source-positions.h"
#include "src/torque/type-inference.h"
#include "src/torque/type-visitor.h"
#include "src/torque/types.h"
@@ -26,6 +28,10 @@ namespace v8 {
namespace internal {
namespace torque {
+namespace {
+const char* BuiltinIncludesMarker = "// __BUILTIN_INCLUDES_MARKER__\n";
+} // namespace
+
VisitResult ImplementationVisitor::Visit(Expression* expr) {
CurrentSourcePosition::Scope scope(expr->pos);
switch (expr->kind) {
@@ -76,11 +82,17 @@ void ImplementationVisitor::BeginGeneratedFiles() {
out << "#include " << StringLiteralQuote(include_path) << "\n";
}
- for (SourceId file : SourceFileMap::AllSources()) {
- out << "#include \"torque-generated/" +
- SourceFileMap::PathFromV8RootWithoutExtension(file) +
- "-tq-csa.h\"\n";
- }
+ out << "// Required Builtins:\n";
+ out << "#include \"torque-generated/" +
+ SourceFileMap::PathFromV8RootWithoutExtension(file) +
+ "-tq-csa.h\"\n";
+ // Now that required include files are collected while generting the file,
+ // we only know the full set at the end. Insert a marker here that is
+ // replaced with the list of includes at the very end.
+ // TODO(nicohartmann@): This is not the most beautiful way to do this,
+ // replace once the cpp file builder is available, where this can be
+ // handled easily.
+ out << BuiltinIncludesMarker;
out << "\n";
out << "namespace v8 {\n"
@@ -658,8 +670,8 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
} else {
DCHECK(builtin->IsStub());
- bool has_context_parameter = signature.HasContextParameter();
for (size_t i = 0; i < signature.parameter_names.size(); ++i) {
+ const std::string& parameter_name = signature.parameter_names[i]->value;
const Type* type = signature.types()[i];
const bool mark_as_used = signature.implicit_count > i;
std::string var = AddParameter(i, builtin, &parameters, &parameter_types,
@@ -667,14 +679,8 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
csa_ccfile() << " " << type->GetGeneratedTypeName() << " " << var
<< " = "
<< "UncheckedParameter<" << type->GetGeneratedTNodeTypeName()
- << ">(";
- if (i == 0 && has_context_parameter) {
- csa_ccfile() << "Descriptor::kContext";
- } else {
- csa_ccfile() << "Descriptor::ParameterIndex<"
- << (has_context_parameter ? i - 1 : i) << ">()";
- }
- csa_ccfile() << ");\n";
+ << ">(Descriptor::k" << CamelifyString(parameter_name)
+ << ");\n";
csa_ccfile() << " USE(" << var << ");\n";
}
}
@@ -1743,7 +1749,23 @@ void ImplementationVisitor::GenerateImplementation(const std::string& dir) {
GlobalContext::PerFileStreams& streams =
GlobalContext::GeneratedPerFile(file);
- WriteFile(base_filename + "-tq-csa.cc", streams.csa_ccfile.str());
+ std::string csa_cc = streams.csa_ccfile.str();
+ // Insert missing builtin includes where the marker is.
+ {
+ auto pos = csa_cc.find(BuiltinIncludesMarker);
+ CHECK_NE(pos, std::string::npos);
+ std::string includes;
+ for (const SourceId& include : streams.required_builtin_includes) {
+ std::string include_file =
+ SourceFileMap::PathFromV8RootWithoutExtension(include);
+ includes += "#include \"torque-generated/";
+ includes += include_file;
+ includes += "-tq-csa.h\"\n";
+ }
+ csa_cc.replace(pos, strlen(BuiltinIncludesMarker), std::move(includes));
+ }
+
+ WriteFile(base_filename + "-tq-csa.cc", std::move(csa_cc));
WriteFile(base_filename + "-tq-csa.h", streams.csa_headerfile.str());
WriteFile(base_filename + "-tq.inc",
streams.class_definition_headerfile.str());
@@ -2368,6 +2390,10 @@ LocationReference ImplementationVisitor::GetLocationReference(
}
}
Value* value = Declarations::LookupValue(name);
+ CHECK(value->Position().source.IsValid());
+ if (auto stream = CurrentFileStreams::Get()) {
+ stream->required_builtin_includes.insert(value->Position().source);
+ }
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::AddDefinition(expr->name->pos, value->name()->pos);
}
@@ -2623,6 +2649,11 @@ VisitResult ImplementationVisitor::GenerateCall(
Callable* callable, base::Optional<LocationReference> this_reference,
Arguments arguments, const TypeVector& specialization_types,
bool is_tailcall) {
+ CHECK(callable->Position().source.IsValid());
+ if (auto stream = CurrentFileStreams::Get()) {
+ stream->required_builtin_includes.insert(callable->Position().source);
+ }
+
const Type* return_type = callable->signature().return_type;
if (is_tailcall) {
@@ -3128,8 +3159,7 @@ VisitResult ImplementationVisitor::Visit(CallMethodExpression* expr) {
TypeVector argument_types = arguments.parameters.ComputeTypeVector();
DCHECK_EQ(expr->method->namespace_qualification.size(), 0);
QualifiedName qualified_name = QualifiedName(method_name);
- Callable* callable = nullptr;
- callable = LookupMethod(method_name, target_type, arguments, {});
+ Callable* callable = LookupMethod(method_name, target_type, arguments, {});
if (GlobalContext::collect_language_server_data()) {
LanguageServerData::AddDefinition(expr->method->name->pos,
callable->IdentifierPosition());
@@ -3437,40 +3467,40 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
std::string descriptor_name = builtin->ExternalName() + "Descriptor";
bool has_context_parameter = builtin->signature().HasContextParameter();
size_t kFirstNonContextParameter = has_context_parameter ? 1 : 0;
- size_t parameter_count =
- builtin->parameter_names().size() - kFirstNonContextParameter;
TypeVector return_types = LowerType(builtin->signature().return_type);
- interface_descriptors
- << "class " << descriptor_name
- << " : public TorqueInterfaceDescriptor<" << return_types.size()
- << ", " << parameter_count << ", "
- << (has_context_parameter ? "true" : "false") << "> {\n";
- interface_descriptors << " DECLARE_DESCRIPTOR_WITH_BASE("
- << descriptor_name
- << ", TorqueInterfaceDescriptor)\n";
-
- interface_descriptors
- << " std::vector<MachineType> ReturnType() override {\n";
- interface_descriptors << " return {{";
+ interface_descriptors << "class " << descriptor_name
+ << " : public StaticCallInterfaceDescriptor<"
+ << descriptor_name << "> {\n";
+
+ interface_descriptors << " public:\n";
+
+ if (has_context_parameter) {
+ interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS(";
+ } else {
+ interface_descriptors << " DEFINE_RESULT_AND_PARAMETERS_NO_CONTEXT(";
+ }
+ interface_descriptors << return_types.size();
+ for (size_t i = kFirstNonContextParameter;
+ i < builtin->parameter_names().size(); ++i) {
+ Identifier* parameter = builtin->parameter_names()[i];
+ interface_descriptors << ", k" << CamelifyString(parameter->value);
+ }
+ interface_descriptors << ")\n";
+
+ interface_descriptors << " DEFINE_RESULT_AND_PARAMETER_TYPES(";
PrintCommaSeparatedList(interface_descriptors, return_types,
MachineTypeString);
- interface_descriptors << "}};\n";
- interface_descriptors << " }\n";
-
- interface_descriptors << " std::array<MachineType, " << parameter_count
- << "> ParameterTypes() override {\n";
- interface_descriptors << " return {";
for (size_t i = kFirstNonContextParameter;
i < builtin->parameter_names().size(); ++i) {
- bool last = i + 1 == builtin->parameter_names().size();
const Type* type = builtin->signature().parameter_types.types[i];
- interface_descriptors << MachineTypeString(type)
- << (last ? "" : ", ");
+ interface_descriptors << ", " << MachineTypeString(type);
}
- interface_descriptors << "};\n";
+ interface_descriptors << ")\n";
+
+ interface_descriptors << " DECLARE_DEFAULT_DESCRIPTOR("
+ << descriptor_name << ")\n";
- interface_descriptors << " }\n";
interface_descriptors << "};\n\n";
} else {
builtin_definitions << "TFJ(" << builtin->ExternalName();
@@ -4221,15 +4251,47 @@ void CppClassGenerator::GenerateFieldAccessors(
}
hdr_ << " inline " << type_name << " " << name << "("
- << (indexed ? "int i" : "") << ") const;\n";
+ << (indexed ? "int i" : "");
+ switch (class_field.read_synchronization) {
+ case FieldSynchronization::kNone:
+ break;
+ case FieldSynchronization::kRelaxed:
+ hdr_ << (indexed ? ", RelaxedLoadTag" : "RelaxedLoadTag");
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ hdr_ << (indexed ? ", AcquireLoadTag" : "AcquireLoadTag");
+ break;
+ }
+ hdr_ << ") const;\n";
if (can_contain_heap_objects) {
hdr_ << " inline " << type_name << " " << name
- << "(PtrComprCageBase cage_base" << (indexed ? ", int i" : "")
- << ") const;\n";
+ << "(PtrComprCageBase cage_base" << (indexed ? ", int i" : "");
+ switch (class_field.read_synchronization) {
+ case FieldSynchronization::kNone:
+ break;
+ case FieldSynchronization::kRelaxed:
+ hdr_ << ", RelaxedLoadTag";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ hdr_ << ", AcquireLoadTag";
+ break;
+ }
+
+ hdr_ << ") const;\n";
}
hdr_ << " inline void set_" << name << "(" << (indexed ? "int i, " : "")
- << type_name << " value"
- << (can_contain_heap_objects
+ << type_name << " value";
+ switch (class_field.write_synchronization) {
+ case FieldSynchronization::kNone:
+ break;
+ case FieldSynchronization::kRelaxed:
+ hdr_ << ", RelaxedStoreTag";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ hdr_ << ", ReleaseStoreTag";
+ break;
+ }
+ hdr_ << (can_contain_heap_objects
? ", WriteBarrierMode mode = UPDATE_WRITE_BARRIER"
: "")
<< ");\n\n";
@@ -4239,10 +4301,32 @@ void CppClassGenerator::GenerateFieldAccessors(
if (can_contain_heap_objects) {
inl_ << "template <class D, class P>\n";
inl_ << type_name << " " << gen_name_ << "<D, P>::" << name << "("
- << (indexed ? "int i" : "") << ") const {\n";
+ << (indexed ? "int i" : "");
+ switch (class_field.read_synchronization) {
+ case FieldSynchronization::kNone:
+ break;
+ case FieldSynchronization::kRelaxed:
+ inl_ << (indexed ? ", RelaxedLoadTag" : "RelaxedLoadTag");
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ inl_ << (indexed ? ", AcquireLoadTag" : "AcquireLoadTag");
+ break;
+ }
+ inl_ << ") const {\n";
inl_ << " PtrComprCageBase cage_base = GetPtrComprCageBase(*this);\n";
inl_ << " return " << gen_name_ << "::" << name << "(cage_base"
- << (indexed ? ", i" : "") << ");\n";
+ << (indexed ? ", i" : "");
+ switch (class_field.read_synchronization) {
+ case FieldSynchronization::kNone:
+ break;
+ case FieldSynchronization::kRelaxed:
+ inl_ << ", kRelaxedLoad";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ inl_ << ", kAcquireLoad";
+ break;
+ }
+ inl_ << ");\n";
inl_ << "}\n";
}
@@ -4252,6 +4336,18 @@ void CppClassGenerator::GenerateFieldAccessors(
if (can_contain_heap_objects) inl_ << "PtrComprCageBase cage_base";
if (can_contain_heap_objects && indexed) inl_ << ", ";
if (indexed) inl_ << "int i";
+ switch (class_field.read_synchronization) {
+ case FieldSynchronization::kNone:
+ break;
+ case FieldSynchronization::kRelaxed:
+ inl_ << ((can_contain_heap_objects || indexed) ? ", RelaxedLoadTag"
+ : "RelaxedLoadTag");
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ inl_ << ((can_contain_heap_objects || indexed) ? ", AcquireLoadTag"
+ : "AcquireLoadTag");
+ break;
+ }
inl_ << ") const {\n";
inl_ << " " << type_name << " value;\n";
@@ -4266,6 +4362,16 @@ void CppClassGenerator::GenerateFieldAccessors(
inl_ << "int i, ";
}
inl_ << type_name << " value";
+ switch (class_field.write_synchronization) {
+ case FieldSynchronization::kNone:
+ break;
+ case FieldSynchronization::kRelaxed:
+ inl_ << ", RelaxedStoreTag";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ inl_ << ", ReleaseStoreTag";
+ break;
+ }
if (can_contain_heap_objects) {
inl_ << ", WriteBarrierMode mode";
}
@@ -4339,10 +4445,10 @@ void CppClassGenerator::EmitLoadFieldStatement(
if (!field_type->IsSubtypeOf(TypeOracle::GetTaggedType())) {
if (class_field.read_synchronization ==
FieldSynchronization::kAcquireRelease) {
- ReportError("Torque doesn't support @acquireRead on untagged data");
+ ReportError("Torque doesn't support @cppAcquireRead on untagged data");
} else if (class_field.read_synchronization ==
FieldSynchronization::kRelaxed) {
- ReportError("Torque doesn't support @relaxedRead on untagged data");
+ ReportError("Torque doesn't support @cppRelaxedRead on untagged data");
}
inl_ << "this->template ReadField<" << type_name << ">(" << offset
<< ");\n";
@@ -4636,11 +4742,33 @@ void GeneratePrintDefinitionsForClass(std::ostream& impl, const ClassType* type,
// TODO(turbofan): Print struct fields too.
impl << "\" <struct field printing still unimplemented>\";\n";
} else {
- impl << "this->" << f.name_and_type.name << "();\n";
+ impl << "this->" << f.name_and_type.name;
+ switch (f.read_synchronization) {
+ case FieldSynchronization::kNone:
+ impl << "();\n";
+ break;
+ case FieldSynchronization::kRelaxed:
+ impl << "(kRelaxedLoad);\n";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ impl << "(kAcquireLoad);\n";
+ break;
+ }
}
} else {
impl << " os << \"\\n - " << f.name_and_type.name << ": \" << "
- << "Brief(this->" << f.name_and_type.name << "());\n";
+ << "Brief(this->" << f.name_and_type.name;
+ switch (f.read_synchronization) {
+ case FieldSynchronization::kNone:
+ impl << "());\n";
+ break;
+ case FieldSynchronization::kRelaxed:
+ impl << "(kRelaxedLoad));\n";
+ break;
+ case FieldSynchronization::kAcquireRelease:
+ impl << "(kAcquireLoad));\n";
+ break;
+ }
}
}
}
@@ -4864,7 +4992,7 @@ namespace {
void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
std::string offset, const Field& leaf_field,
std::string indexed_field_size,
- std::ostream& cc_contents) {
+ std::ostream& cc_contents, bool is_map) {
const Type* field_type = leaf_field.name_and_type.type;
bool maybe_object =
@@ -4879,8 +5007,12 @@ void GenerateFieldValueVerifier(const std::string& class_name, bool indexed,
const std::string value = leaf_field.name_and_type.name + "__value";
// Read the field.
- cc_contents << " " << object_type << " " << value << " = TaggedField<"
- << object_type << ">::load(o, " << offset << ");\n";
+ if (is_map) {
+ cc_contents << " " << object_type << " " << value << " = o.map();\n";
+ } else {
+ cc_contents << " " << object_type << " " << value << " = TaggedField<"
+ << object_type << ">::load(o, " << offset << ");\n";
+ }
// Call VerifyPointer or VerifyMaybeObjectPointer on it.
cc_contents << " " << object_type << "::" << verify_fn << "(isolate, "
@@ -4947,13 +5079,13 @@ void GenerateClassFieldVerifier(const std::string& class_name,
class_name, f.index.has_value(),
field_start_offset + " + " + std::to_string(*struct_field.offset),
struct_field, std::to_string((*struct_type)->PackedSize()),
- cc_contents);
+ cc_contents, f.name_and_type.name == "map");
}
}
} else {
GenerateFieldValueVerifier(class_name, f.index.has_value(),
field_start_offset, f, "kTaggedSize",
- cc_contents);
+ cc_contents, f.name_and_type.name == "map");
}
cc_contents << " }\n";
diff --git a/chromium/v8/src/torque/torque-parser.cc b/chromium/v8/src/torque/torque-parser.cc
index cab0182677a..c74ef024978 100644
--- a/chromium/v8/src/torque/torque-parser.cc
+++ b/chromium/v8/src/torque/torque-parser.cc
@@ -1954,22 +1954,23 @@ base::Optional<ParseResult> MakeAnnotation(ParseResultIterator* child_results) {
}
base::Optional<ParseResult> MakeClassField(ParseResultIterator* child_results) {
- AnnotationSet annotations(child_results,
- {ANNOTATION_NO_VERIFIER, ANNOTATION_RELAXED_WRITE,
- ANNOTATION_RELAXED_READ, ANNOTATION_RELEASE_WRITE,
- ANNOTATION_ACQUIRE_READ},
- {ANNOTATION_IF, ANNOTATION_IFNOT});
+ AnnotationSet annotations(
+ child_results,
+ {ANNOTATION_NO_VERIFIER, ANNOTATION_CPP_RELAXED_STORE,
+ ANNOTATION_CPP_RELAXED_LOAD, ANNOTATION_CPP_RELEASE_STORE,
+ ANNOTATION_CPP_ACQUIRE_LOAD},
+ {ANNOTATION_IF, ANNOTATION_IFNOT});
bool generate_verify = !annotations.Contains(ANNOTATION_NO_VERIFIER);
FieldSynchronization write_synchronization = FieldSynchronization::kNone;
- if (annotations.Contains(ANNOTATION_RELEASE_WRITE)) {
+ if (annotations.Contains(ANNOTATION_CPP_RELEASE_STORE)) {
write_synchronization = FieldSynchronization::kAcquireRelease;
- } else if (annotations.Contains(ANNOTATION_RELAXED_WRITE)) {
+ } else if (annotations.Contains(ANNOTATION_CPP_RELAXED_STORE)) {
write_synchronization = FieldSynchronization::kRelaxed;
}
FieldSynchronization read_synchronization = FieldSynchronization::kNone;
- if (annotations.Contains(ANNOTATION_ACQUIRE_READ)) {
+ if (annotations.Contains(ANNOTATION_CPP_ACQUIRE_LOAD)) {
read_synchronization = FieldSynchronization::kAcquireRelease;
- } else if (annotations.Contains(ANNOTATION_RELAXED_READ)) {
+ } else if (annotations.Contains(ANNOTATION_CPP_RELAXED_LOAD)) {
read_synchronization = FieldSynchronization::kRelaxed;
}
std::vector<ConditionalAnnotation> conditions;
diff --git a/chromium/v8/src/torque/torque.cc b/chromium/v8/src/torque/torque.cc
index 4e71c430140..ca16ce4ca6c 100644
--- a/chromium/v8/src/torque/torque.cc
+++ b/chromium/v8/src/torque/torque.cc
@@ -33,7 +33,12 @@ int WrappedMain(int argc, const char** argv) {
} else if (argument == "-v8-root") {
options.v8_root = std::string(argv[++i]);
} else if (argument == "-m32") {
+#ifdef V8_COMPRESS_POINTERS
+ std::cerr << "Pointer compression is incompatible with -m32.\n";
+ base::OS::Abort();
+#else
options.force_32bit_output = true;
+#endif
} else if (argument == "-annotate-ir") {
options.annotate_ir = true;
} else {
diff --git a/chromium/v8/src/tracing/trace-event.cc b/chromium/v8/src/tracing/trace-event.cc
index 8e69fe55205..7f1f96adc79 100644
--- a/chromium/v8/src/tracing/trace-event.cc
+++ b/chromium/v8/src/tracing/trace-event.cc
@@ -20,6 +20,8 @@ v8::TracingController* TraceEventHelper::GetTracingController() {
return v8::internal::V8::GetCurrentPlatform()->GetTracingController();
}
+#ifdef V8_RUNTIME_CALL_STATS
+
void CallStatsScopedTracer::AddEndTraceEvent() {
if (!has_parent_scope_ && p_data_->isolate) {
auto value = v8::tracing::TracedValue::Create();
@@ -52,6 +54,8 @@ void CallStatsScopedTracer::Initialize(v8::internal::Isolate* isolate,
v8::internal::tracing::kGlobalScope, v8::internal::tracing::kNoId,
TRACE_EVENT_FLAG_NONE, v8::internal::tracing::kNoId);
}
+
+#endif // defined(V8_RUNTIME_CALL_STATS)
#endif // !defined(V8_USE_PERFETTO)
} // namespace tracing
diff --git a/chromium/v8/src/tracing/trace-event.h b/chromium/v8/src/tracing/trace-event.h
index b5a16c85907..36c9cdcadf5 100644
--- a/chromium/v8/src/tracing/trace-event.h
+++ b/chromium/v8/src/tracing/trace-event.h
@@ -279,6 +279,7 @@ enum CategoryGroupEnabledFlags {
#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
+#ifdef V8_RUNTIME_CALL_STATS
#define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
v8::internal::tracing::CallStatsScopedTracer INTERNAL_TRACE_EVENT_UID( \
@@ -288,6 +289,9 @@ enum CategoryGroupEnabledFlags {
.Initialize(isolate, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
name); \
}
+#else // V8_RUNTIME_CALL_STATS
+#define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name)
+#endif // V8_RUNTIME_CALL_STATS
namespace v8 {
namespace internal {
@@ -588,6 +592,7 @@ class ScopedTracer {
Data data_;
};
+#ifdef V8_RUNTIME_CALL_STATS
// Do not use directly.
class CallStatsScopedTracer {
public:
@@ -612,6 +617,7 @@ class CallStatsScopedTracer {
Data* p_data_;
Data data_;
};
+#endif // defined(V8_RUNTIME_CALL_STATS)
} // namespace tracing
} // namespace internal
@@ -619,6 +625,8 @@ class CallStatsScopedTracer {
#else // defined(V8_USE_PERFETTO)
+#ifdef V8_RUNTIME_CALL_STATS
+
#define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category, name) \
struct PERFETTO_UID(ScopedEvent) { \
struct ScopedStats { \
@@ -650,6 +658,7 @@ class CallStatsScopedTracer {
{ isolate, 0 } \
}
+#endif // defined(V8_RUNTIME_CALL_STATS)
#endif // defined(V8_USE_PERFETTO)
#endif // V8_TRACING_TRACE_EVENT_H_
diff --git a/chromium/v8/src/trap-handler/DEPS b/chromium/v8/src/trap-handler/DEPS
index 061634d51b8..dc6d27e1554 100644
--- a/chromium/v8/src/trap-handler/DEPS
+++ b/chromium/v8/src/trap-handler/DEPS
@@ -6,19 +6,8 @@ include_rules = [
"-src",
"-include",
"+src/trap-handler",
+ # Use the IMMEDIATE_CRASH() macro for crashing non-recoverably on check failure.
+ "+src/base/immediate-crash.h",
+ # Allow include/v8config.h for V8_OS_* macros.
+ "+include/v8config.h",
]
-
-specific_include_rules = {
- "trap-handler.h": [
- "+src/base/build_config.h",
- "+src/common/globals.h",
- "+src/flags/flags.h",
- ],
- "handler-inside-posix.h": [
- # To access V8_OS_LINUX. This file is already included in build_config.h.
- "+include/v8config.h",
- ],
- "handler-inside-win.h": [
- "+src/base/macros.h",
- ]
-}
diff --git a/chromium/v8/src/trap-handler/handler-inside-posix.h b/chromium/v8/src/trap-handler/handler-inside-posix.h
index 49fe23a7128..27e46773bbd 100644
--- a/chromium/v8/src/trap-handler/handler-inside-posix.h
+++ b/chromium/v8/src/trap-handler/handler-inside-posix.h
@@ -6,6 +6,7 @@
#define V8_TRAP_HANDLER_HANDLER_INSIDE_POSIX_H_
#include <signal.h>
+
#include "include/v8config.h"
namespace v8 {
diff --git a/chromium/v8/src/trap-handler/handler-inside-win.h b/chromium/v8/src/trap-handler/handler-inside-win.h
index 6db28149e7d..9b9b0605178 100644
--- a/chromium/v8/src/trap-handler/handler-inside-win.h
+++ b/chromium/v8/src/trap-handler/handler-inside-win.h
@@ -7,7 +7,7 @@
#include <windows.h>
-#include "src/base/macros.h"
+#include "src/trap-handler/trap-handler.h" // For TH_DISABLE_ASAN.
namespace v8 {
namespace internal {
@@ -18,7 +18,7 @@ LONG WINAPI HandleWasmTrap(EXCEPTION_POINTERS* exception);
// On Windows, asan installs its own exception handler which maps shadow
// memory. Since our exception handler may be executed before the asan exception
// handler, we have to make sure that asan shadow memory is not accessed here.
-DISABLE_ASAN bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception);
+TH_DISABLE_ASAN bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception);
} // namespace trap_handler
} // namespace internal
diff --git a/chromium/v8/src/trap-handler/handler-inside.cc b/chromium/v8/src/trap-handler/handler-inside.cc
index 81e37c205af..31d7f24491a 100644
--- a/chromium/v8/src/trap-handler/handler-inside.cc
+++ b/chromium/v8/src/trap-handler/handler-inside.cc
@@ -50,11 +50,14 @@ bool TryFindLandingPad(uintptr_t fault_addr, uintptr_t* landing_pad) {
if (data == nullptr) {
continue;
}
- const Address base = data->base;
+ const uintptr_t base = data->base;
if (fault_addr >= base && fault_addr < base + data->size) {
// Hurray, we found the code object. Check for protected addresses.
- const ptrdiff_t offset = fault_addr - base;
+ const uint32_t offset = static_cast<uint32_t>(fault_addr - base);
+ // The offset must fit in 32 bit, see comment on
+ // ProtectedInstructionData::instr_offset.
+ TH_DCHECK(base + offset == fault_addr);
for (unsigned i = 0; i < data->num_protected_instructions; ++i) {
if (data->instructions[i].instr_offset == offset) {
diff --git a/chromium/v8/src/trap-handler/handler-outside-posix.cc b/chromium/v8/src/trap-handler/handler-outside-posix.cc
index 55bcc0075b3..004783b64f3 100644
--- a/chromium/v8/src/trap-handler/handler-outside-posix.cc
+++ b/chromium/v8/src/trap-handler/handler-outside-posix.cc
@@ -21,6 +21,8 @@
#include <signal.h>
+#include <cstdio>
+
#include "src/trap-handler/handler-inside-posix.h"
#include "src/trap-handler/trap-handler-internal.h"
@@ -39,7 +41,7 @@ bool g_is_default_signal_handler_registered;
} // namespace
bool RegisterDefaultTrapHandler() {
- CHECK(!g_is_default_signal_handler_registered);
+ TH_CHECK(!g_is_default_signal_handler_registered);
struct sigaction action;
action.sa_sigaction = HandleSignal;
@@ -61,7 +63,7 @@ bool RegisterDefaultTrapHandler() {
defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) || \
defined(UNDEFINED_SANITIZER)
struct sigaction installed_handler;
- CHECK_EQ(sigaction(kOobSignal, NULL, &installed_handler), 0);
+ TH_CHECK(sigaction(kOobSignal, NULL, &installed_handler) == 0);
// If the installed handler does not point to HandleSignal, then
// allow_user_segv_handler is 0.
if (installed_handler.sa_sigaction != HandleSignal) {
diff --git a/chromium/v8/src/trap-handler/handler-outside-win.cc b/chromium/v8/src/trap-handler/handler-outside-win.cc
index 09673c8cccf..307f919d789 100644
--- a/chromium/v8/src/trap-handler/handler-outside-win.cc
+++ b/chromium/v8/src/trap-handler/handler-outside-win.cc
@@ -40,7 +40,7 @@ void* g_registered_handler = nullptr;
bool RegisterDefaultTrapHandler() {
constexpr ULONG first = TRUE;
- CHECK_NULL(g_registered_handler);
+ TH_CHECK(g_registered_handler == nullptr);
g_registered_handler = AddVectoredExceptionHandler(first, HandleWasmTrap);
return nullptr != g_registered_handler;
diff --git a/chromium/v8/src/trap-handler/handler-outside.cc b/chromium/v8/src/trap-handler/handler-outside.cc
index 62355a5b60f..2eabcca0f7e 100644
--- a/chromium/v8/src/trap-handler/handler-outside.cc
+++ b/chromium/v8/src/trap-handler/handler-outside.cc
@@ -66,7 +66,7 @@ bool IsDisjoint(const CodeProtectionInfo* a, const CodeProtectionInfo* b) {
// registered.
void VerifyCodeRangeIsDisjoint(const CodeProtectionInfo* code_info) {
for (size_t i = 0; i < gNumCodeObjects; ++i) {
- DCHECK(IsDisjoint(code_info, gCodeObjects[i].code_info));
+ TH_DCHECK(IsDisjoint(code_info, gCodeObjects[i].code_info));
}
}
@@ -79,11 +79,11 @@ void ValidateCodeObjects() {
// Do some sanity checks on the protected instruction data
for (unsigned i = 0; i < data->num_protected_instructions; ++i) {
- DCHECK_GE(data->instructions[i].instr_offset, 0);
- DCHECK_LT(data->instructions[i].instr_offset, data->size);
- DCHECK_GE(data->instructions[i].landing_offset, 0);
- DCHECK_LT(data->instructions[i].landing_offset, data->size);
- DCHECK_GT(data->instructions[i].landing_offset,
+ TH_DCHECK(data->instructions[i].instr_offset >= 0);
+ TH_DCHECK(data->instructions[i].instr_offset < data->size);
+ TH_DCHECK(data->instructions[i].landing_offset >= 0);
+ TH_DCHECK(data->instructions[i].landing_offset < data->size);
+ TH_DCHECK(data->instructions[i].landing_offset >
data->instructions[i].instr_offset);
}
}
@@ -92,10 +92,10 @@ void ValidateCodeObjects() {
size_t free_count = 0;
for (size_t i = gNextCodeObject; i != gNumCodeObjects;
i = gCodeObjects[i].next_free) {
- DCHECK_LT(i, gNumCodeObjects);
+ TH_DCHECK(i < gNumCodeObjects);
++free_count;
// This check will fail if we encounter a cycle.
- DCHECK_LE(free_count, gNumCodeObjects);
+ TH_DCHECK(free_count <= gNumCodeObjects);
}
// Check that all free entries are reachable via the free list.
@@ -105,12 +105,12 @@ void ValidateCodeObjects() {
++free_count2;
}
}
- DCHECK_EQ(free_count, free_count2);
+ TH_DCHECK(free_count == free_count2);
}
} // namespace
CodeProtectionInfo* CreateHandlerData(
- Address base, size_t size, size_t num_protected_instructions,
+ uintptr_t base, size_t size, size_t num_protected_instructions,
const ProtectedInstructionData* protected_instructions) {
const size_t alloc_size = HandlerDataSize(num_protected_instructions);
CodeProtectionInfo* data =
@@ -131,9 +131,8 @@ CodeProtectionInfo* CreateHandlerData(
}
int RegisterHandlerData(
- Address base, size_t size, size_t num_protected_instructions,
+ uintptr_t base, size_t size, size_t num_protected_instructions,
const ProtectedInstructionData* protected_instructions) {
-
CodeProtectionInfo* data = CreateHandlerData(
base, size, num_protected_instructions, protected_instructions);
@@ -188,7 +187,7 @@ int RegisterHandlerData(
gNumCodeObjects = new_size;
}
- DCHECK(gCodeObjects[i].code_info == nullptr);
+ TH_DCHECK(gCodeObjects[i].code_info == nullptr);
// Find out where the next entry should go.
gNextCodeObject = gCodeObjects[i].next_free;
@@ -211,7 +210,7 @@ void ReleaseHandlerData(int index) {
if (index == kInvalidIndex) {
return;
}
- DCHECK_GE(index, 0);
+ TH_DCHECK(index >= 0);
// Remove the data from the global list if it's there.
CodeProtectionInfo* data = nullptr;
@@ -230,7 +229,7 @@ void ReleaseHandlerData(int index) {
}
// TODO(eholk): on debug builds, ensure there are no more copies in
// the list.
- DCHECK_NOT_NULL(data); // make sure we're releasing legitimate handler data.
+ TH_DCHECK(data); // make sure we're releasing legitimate handler data.
free(data);
}
@@ -259,9 +258,9 @@ bool EnableTrapHandler(bool use_v8_handler) {
// trap handlers are disabled.
bool can_enable =
g_can_enable_trap_handler.exchange(false, std::memory_order_relaxed);
- if (!can_enable) {
- FATAL("EnableTrapHandler called twice, or after IsTrapHandlerEnabled");
- }
+ // EnableTrapHandler called twice, or after IsTrapHandlerEnabled.
+ TH_CHECK(can_enable);
+
if (!V8_TRAP_HANDLER_SUPPORTED) {
return false;
}
diff --git a/chromium/v8/src/trap-handler/handler-shared.cc b/chromium/v8/src/trap-handler/handler-shared.cc
index 0607d2ed547..977d28daee9 100644
--- a/chromium/v8/src/trap-handler/handler-shared.cc
+++ b/chromium/v8/src/trap-handler/handler-shared.cc
@@ -26,7 +26,7 @@ namespace trap_handler {
// We declare this as int rather than bool as a workaround for a glibc bug, in
// which the dynamic loader cannot handle executables whose TLS area is only
// 1 byte in size; see https://sourceware.org/bugzilla/show_bug.cgi?id=14898.
-THREAD_LOCAL int g_thread_in_wasm_code;
+thread_local int g_thread_in_wasm_code;
static_assert(sizeof(g_thread_in_wasm_code) > 1,
"sizeof(thread_local_var) must be > 1, see "
diff --git a/chromium/v8/src/trap-handler/trap-handler-internal.h b/chromium/v8/src/trap-handler/trap-handler-internal.h
index 843cd34b704..71588ab8957 100644
--- a/chromium/v8/src/trap-handler/trap-handler-internal.h
+++ b/chromium/v8/src/trap-handler/trap-handler-internal.h
@@ -22,7 +22,7 @@ namespace trap_handler {
// protected memory access instructions and an offset to a landing pad to handle
// faults on that instruction.
struct CodeProtectionInfo {
- Address base;
+ uintptr_t base;
size_t size;
size_t num_protected_instructions;
ProtectedInstructionData instructions[1];
diff --git a/chromium/v8/src/trap-handler/trap-handler.h b/chromium/v8/src/trap-handler/trap-handler.h
index fcdc256a38d..a27ea236e79 100644
--- a/chromium/v8/src/trap-handler/trap-handler.h
+++ b/chromium/v8/src/trap-handler/trap-handler.h
@@ -10,15 +10,13 @@
#include <atomic>
-#include "src/base/build_config.h"
-#include "src/common/globals.h"
-#include "src/flags/flags.h"
+#include "include/v8config.h"
+#include "src/base/immediate-crash.h"
namespace v8 {
namespace internal {
namespace trap_handler {
-// TODO(eholk): Support trap handlers on other platforms.
#if V8_TARGET_ARCH_X64 && V8_OS_LINUX && !V8_OS_ANDROID
#define V8_TRAP_HANDLER_SUPPORTED true
#elif V8_TARGET_ARCH_X64 && V8_OS_WIN
@@ -33,6 +31,35 @@ namespace trap_handler {
#define V8_TRAP_HANDLER_SUPPORTED false
#endif
+// Setup for shared library export.
+#if defined(BUILDING_V8_SHARED) && defined(V8_OS_WIN)
+#define TH_EXPORT_PRIVATE __declspec(dllexport)
+#elif defined(BUILDING_V8_SHARED)
+#define TH_EXPORT_PRIVATE __attribute__((visibility("default")))
+#elif defined(USING_V8_SHARED) && defined(V8_OS_WIN)
+#define TH_EXPORT_PRIVATE __declspec(dllimport)
+#else
+#define TH_EXPORT_PRIVATE
+#endif
+
+#define TH_CHECK(condition) \
+ if (!(condition)) IMMEDIATE_CRASH();
+#ifdef DEBUG
+#define TH_DCHECK(condition) TH_CHECK(condition)
+#else
+#define TH_DCHECK(condition) void(0)
+#endif
+
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define TH_DISABLE_ASAN __attribute__((no_sanitize_address))
+#else
+#define TH_DISABLE_ASAN
+#endif
+#else
+#define TH_DISABLE_ASAN
+#endif
+
struct ProtectedInstructionData {
// The offset of this instruction from the start of its code object.
// Wasm code never grows larger than 2GB, so uint32_t is sufficient.
@@ -50,23 +77,14 @@ const int kInvalidIndex = -1;
///
/// This returns a number that can be used to identify the handler data to
/// ReleaseHandlerData, or -1 on failure.
-int V8_EXPORT_PRIVATE RegisterHandlerData(
- Address base, size_t size, size_t num_protected_instructions,
+int TH_EXPORT_PRIVATE RegisterHandlerData(
+ uintptr_t base, size_t size, size_t num_protected_instructions,
const ProtectedInstructionData* protected_instructions);
/// Removes the data from the master list and frees any memory, if necessary.
/// TODO(mtrofin): We can switch to using size_t for index and not need
/// kInvalidIndex.
-void V8_EXPORT_PRIVATE ReleaseHandlerData(int index);
-
-#if V8_OS_WIN
-#define THREAD_LOCAL __declspec(thread)
-#elif V8_OS_ANDROID
-// TODO(eholk): fix this before enabling for trap handlers for Android.
-#define THREAD_LOCAL
-#else
-#define THREAD_LOCAL __thread
-#endif
+void TH_EXPORT_PRIVATE ReleaseHandlerData(int index);
// Initially false, set to true if when trap handlers are enabled. Never goes
// back to false then.
@@ -83,10 +101,10 @@ extern std::atomic<bool> g_can_enable_trap_handler;
//
// use_v8_handler indicates that V8 should install its own handler
// rather than relying on the embedder to do it.
-V8_EXPORT_PRIVATE bool EnableTrapHandler(bool use_v8_handler);
+TH_EXPORT_PRIVATE bool EnableTrapHandler(bool use_v8_handler);
inline bool IsTrapHandlerEnabled() {
- DCHECK_IMPLIES(g_is_trap_handler_enabled, V8_TRAP_HANDLER_SUPPORTED);
+ TH_DCHECK(!g_is_trap_handler_enabled || V8_TRAP_HANDLER_SUPPORTED);
// Disallow enabling the trap handler after retrieving the current value.
// Re-enabling them late can produce issues because code or objects might have
// been generated under the assumption that trap handlers are disabled.
@@ -97,34 +115,40 @@ inline bool IsTrapHandlerEnabled() {
return g_is_trap_handler_enabled;
}
-extern THREAD_LOCAL int g_thread_in_wasm_code;
+#if defined(V8_OS_AIX)
+// `thread_local` does not link on AIX:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100641
+extern __thread int g_thread_in_wasm_code;
+#else
+extern thread_local int g_thread_in_wasm_code;
+#endif
// Return the address of the thread-local {g_thread_in_wasm_code} variable. This
// pointer can be accessed and modified as long as the thread calling this
// function exists. Only use if from the same thread do avoid race conditions.
-V8_NOINLINE V8_EXPORT_PRIVATE int* GetThreadInWasmThreadLocalAddress();
+V8_NOINLINE TH_EXPORT_PRIVATE int* GetThreadInWasmThreadLocalAddress();
// On Windows, asan installs its own exception handler which maps shadow
// memory. Since our exception handler may be executed before the asan exception
// handler, we have to make sure that asan shadow memory is not accessed here.
-DISABLE_ASAN inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
+TH_DISABLE_ASAN inline bool IsThreadInWasm() { return g_thread_in_wasm_code; }
inline void SetThreadInWasm() {
if (IsTrapHandlerEnabled()) {
- DCHECK(!IsThreadInWasm());
+ TH_DCHECK(!IsThreadInWasm());
g_thread_in_wasm_code = true;
}
}
inline void ClearThreadInWasm() {
if (IsTrapHandlerEnabled()) {
- DCHECK(IsThreadInWasm());
+ TH_DCHECK(IsThreadInWasm());
g_thread_in_wasm_code = false;
}
}
bool RegisterDefaultTrapHandler();
-V8_EXPORT_PRIVATE void RemoveTrapHandler();
+TH_EXPORT_PRIVATE void RemoveTrapHandler();
size_t GetRecoveredTrapCount();
diff --git a/chromium/v8/src/utils/allocation.cc b/chromium/v8/src/utils/allocation.cc
index db4d3ac13b6..0c7c468d821 100644
--- a/chromium/v8/src/utils/allocation.cc
+++ b/chromium/v8/src/utils/allocation.cc
@@ -5,19 +5,21 @@
#include "src/utils/allocation.h"
#include <stdlib.h> // For free, malloc.
+
#include "src/base/bits.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/lsan-page-allocator.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
-#include "src/sanitizer/lsan-page-allocator.h"
#include "src/utils/memcopy.h"
#include "src/utils/vector.h"
#if V8_LIBC_BIONIC
-#include <malloc.h> // NOLINT
+#include <malloc.h>
#include "src/base/platform/wrappers.h"
#endif
@@ -293,5 +295,137 @@ void VirtualMemory::FreeReadOnly() {
RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
+VirtualMemoryCage::VirtualMemoryCage() = default;
+
+VirtualMemoryCage::~VirtualMemoryCage() { Free(); }
+
+VirtualMemoryCage::VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT {
+ *this = std::move(other);
+}
+
+VirtualMemoryCage& VirtualMemoryCage::operator=(VirtualMemoryCage&& other)
+ V8_NOEXCEPT {
+ page_allocator_ = std::move(other.page_allocator_);
+ reservation_ = std::move(other.reservation_);
+ return *this;
+}
+
+namespace {
+inline Address VirtualMemoryCageStart(
+ Address reservation_start,
+ const VirtualMemoryCage::ReservationParams& params) {
+ return RoundUp(reservation_start + params.base_bias_size,
+ params.base_alignment) -
+ params.base_bias_size;
+}
+} // namespace
+
+bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
+ DCHECK(!reservation_.IsReserved());
+
+ const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
+ CHECK(IsAligned(params.reservation_size, allocate_page_size));
+ CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
+ (IsAligned(params.base_alignment, allocate_page_size) &&
+ IsAligned(params.base_bias_size, allocate_page_size)));
+ CHECK_LE(params.base_bias_size, params.reservation_size);
+
+ Address hint = RoundDown(params.requested_start_hint,
+ RoundUp(params.base_alignment, allocate_page_size)) -
+ RoundUp(params.base_bias_size, allocate_page_size);
+
+ if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
+ // When the base doesn't need to be aligned, the virtual memory reservation
+ // fails only due to OOM.
+ VirtualMemory reservation(params.page_allocator, params.reservation_size,
+ reinterpret_cast<void*>(hint));
+ if (!reservation.IsReserved()) return false;
+
+ reservation_ = std::move(reservation);
+ base_ = reservation_.address() + params.base_bias_size;
+ CHECK_EQ(reservation_.size(), params.reservation_size);
+ } else {
+ // Otherwise, we need to try harder by first overreserving
+ // in hopes of finding a correctly aligned address within the larger
+ // reservation.
+ const int kMaxAttempts = 4;
+ for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
+ // Reserve a region of twice the size so that there is an aligned address
+ // within it that's usable as the cage base.
+ VirtualMemory padded_reservation(params.page_allocator,
+ params.reservation_size * 2,
+ reinterpret_cast<void*>(hint));
+ if (!padded_reservation.IsReserved()) return false;
+
+ // Find properly aligned sub-region inside the reservation.
+ Address address =
+ VirtualMemoryCageStart(padded_reservation.address(), params);
+ CHECK(padded_reservation.InVM(address, params.reservation_size));
+
+#if defined(V8_OS_FUCHSIA)
+ // Fuchsia does not respect given hints so as a workaround we will use
+ // overreserved address space region instead of trying to re-reserve
+ // a subregion.
+ bool overreserve = true;
+#else
+ // For the last attempt use the overreserved region to avoid an OOM crash.
+ // This case can happen if there are many isolates being created in
+ // parallel that race for reserving the regions.
+ bool overreserve = (attempt == kMaxAttempts - 1);
+#endif
+
+ if (overreserve) {
+ if (padded_reservation.InVM(address, params.reservation_size)) {
+ reservation_ = std::move(padded_reservation);
+ base_ = address + params.base_bias_size;
+ break;
+ }
+ } else {
+ // Now free the padded reservation and immediately try to reserve an
+ // exact region at aligned address. We have to do this dancing because
+ // the reservation address requirement is more complex than just a
+ // certain alignment and not all operating systems support freeing parts
+ // of reserved address space regions.
+ padded_reservation.Free();
+
+ VirtualMemory reservation(params.page_allocator,
+ params.reservation_size,
+ reinterpret_cast<void*>(address));
+ if (!reservation.IsReserved()) return false;
+
+ // The reservation could still be somewhere else but we can accept it
+ // if it has the required alignment.
+ Address address = VirtualMemoryCageStart(reservation.address(), params);
+ if (reservation.address() == address) {
+ reservation_ = std::move(reservation);
+ base_ = address + params.base_bias_size;
+ CHECK_EQ(reservation_.size(), params.reservation_size);
+ break;
+ }
+ }
+ }
+ }
+ CHECK_NE(base_, kNullAddress);
+ CHECK(IsAligned(base_, params.base_alignment));
+
+ const Address allocatable_base = RoundUp(base_, params.page_size);
+ const size_t allocatable_size =
+ RoundDown(params.reservation_size - (allocatable_base - base_) -
+ params.base_bias_size,
+ params.page_size);
+ page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
+ params.page_allocator, allocatable_base, allocatable_size,
+ params.page_size);
+ return true;
+}
+
+void VirtualMemoryCage::Free() {
+ if (IsReserved()) {
+ base_ = kNullAddress;
+ page_allocator_.reset();
+ reservation_.Free();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/utils/allocation.h b/chromium/v8/src/utils/allocation.h
index 6adefccf8d9..1d161b7e246 100644
--- a/chromium/v8/src/utils/allocation.h
+++ b/chromium/v8/src/utils/allocation.h
@@ -13,6 +13,11 @@
#include "src/init/v8.h"
namespace v8 {
+
+namespace base {
+class BoundedPageAllocator;
+} // namespace base
+
namespace internal {
class Isolate;
@@ -213,7 +218,7 @@ class VirtualMemory final {
v8::PageAllocator* page_allocator() { return page_allocator_; }
- base::AddressRegion region() const { return region_; }
+ const base::AddressRegion& region() const { return region_; }
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
@@ -250,7 +255,7 @@ class VirtualMemory final {
// can be called on a VirtualMemory that is itself not writable.
V8_EXPORT_PRIVATE void FreeReadOnly();
- bool InVM(Address address, size_t size) {
+ bool InVM(Address address, size_t size) const {
return region_.contains(address, size);
}
@@ -260,6 +265,102 @@ class VirtualMemory final {
base::AddressRegion region_;
};
+// Represents a VirtualMemory reservation along with a BoundedPageAllocator that
+// can be used to allocate within the reservation.
+//
+// Virtual memory cages are used for both the pointer compression cage and code
+// ranges (on platforms that require code ranges) and are configurable via
+// ReservationParams.
+//
+// +------------+-----------+----------- ~~~ -+
+// | ... | ... | ... |
+// +------------+-----------+------------ ~~~ -+
+// ^ ^ ^
+// start cage base allocatable base
+//
+// <------------> <------------------->
+// base bias size allocatable size
+// <-------------------------------------------->
+// reservation size
+//
+// - The reservation is made using ReservationParams::page_allocator.
+// - start is the start of the virtual memory reservation.
+// - cage base is the base address of the cage.
+// - allocatable base is the cage base rounded up to the nearest
+// ReservationParams::page_size, and is the start of the allocatable area for
+// the BoundedPageAllocator.
+//
+// - The base bias is configured by ReservationParams::base_bias_size.
+// - The reservation size is configured by ReservationParams::reservation_size.
+//
+// Additionally,
+// - The alignment of the cage base is configured by
+// ReservationParams::base_alignment.
+// - The page size of the BoundedPageAllocator is configured by
+// ReservationParams::page_size.
+// - A hint for the value of start can be passed by
+// ReservationParams::requested_start_hint.
+//
+// The configuration is subject to the following alignment requirements.
+// Below, AllocatePageSize is short for
+// ReservationParams::page_allocator->AllocatePageSize().
+//
+// - The reservation size must be AllocatePageSize-aligned.
+// - If the base alignment is not kAnyBaseAlignment, both the base alignment
+// and the base bias size must be AllocatePageSize-aligned.
+// - The base alignment may be kAnyBaseAlignment to denote any alignment is
+// acceptable. In this case the base bias size does not need to be aligned.
+class VirtualMemoryCage {
+ public:
+ VirtualMemoryCage();
+ virtual ~VirtualMemoryCage();
+
+ VirtualMemoryCage(const VirtualMemoryCage&) = delete;
+ VirtualMemoryCage& operator=(VirtualMemoryCage&) = delete;
+
+ VirtualMemoryCage(VirtualMemoryCage&& other) V8_NOEXCEPT;
+ VirtualMemoryCage& operator=(VirtualMemoryCage&& other) V8_NOEXCEPT;
+
+ Address base() const { return base_; }
+
+ base::BoundedPageAllocator* page_allocator() const {
+ return page_allocator_.get();
+ }
+
+ VirtualMemory* reservation() { return &reservation_; }
+ const VirtualMemory* reservation() const { return &reservation_; }
+
+ bool IsReserved() const {
+ DCHECK_EQ(base_ != kNullAddress, reservation_.IsReserved());
+ return reservation_.IsReserved();
+ }
+
+ struct ReservationParams {
+ // The allocator to use to reserve the virtual memory.
+ v8::PageAllocator* page_allocator;
+ // See diagram above.
+ size_t reservation_size;
+ size_t base_alignment;
+ size_t base_bias_size;
+ size_t page_size;
+ Address requested_start_hint;
+
+ static constexpr size_t kAnyBaseAlignment = 1;
+ };
+
+ // A number of attempts is made to try to reserve a region that satisfies the
+ // constraints in params, but this may fail. The base address may be different
+ // than the one requested.
+ bool InitReservation(const ReservationParams& params);
+
+ void Free();
+
+ protected:
+ Address base_ = kNullAddress;
+ std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
+ VirtualMemory reservation_;
+};
+
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/utils/ostreams.cc b/chromium/v8/src/utils/ostreams.cc
index b58f51159b8..a2a6f8574f8 100644
--- a/chromium/v8/src/utils/ostreams.cc
+++ b/chromium/v8/src/utils/ostreams.cc
@@ -19,7 +19,7 @@
#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
#define LOG_TAG "v8"
-#include <android/log.h> // NOLINT
+#include <android/log.h>
#endif
namespace v8 {
diff --git a/chromium/v8/src/utils/ostreams.h b/chromium/v8/src/utils/ostreams.h
index 899c85fd94c..2dfedb5fd95 100644
--- a/chromium/v8/src/utils/ostreams.h
+++ b/chromium/v8/src/utils/ostreams.h
@@ -8,7 +8,7 @@
#include <cstddef>
#include <cstdio>
#include <cstring>
-#include <ostream> // NOLINT
+#include <ostream>
#include <streambuf>
#include "include/v8config.h"
@@ -158,10 +158,10 @@ struct PrintIteratorRange {
// Print any collection which can be iterated via std::begin and std::end.
// {Iterator} is the common type of {std::begin} and {std::end} called on a
// {const T&}. This function is only instantiable if that type exists.
-template <typename T, typename Iterator = typename std::common_type<
- decltype(std::begin(std::declval<const T&>())),
- decltype(std::end(std::declval<const T&>()))>::type>
-PrintIteratorRange<Iterator> PrintCollection(const T& collection) {
+template <typename T>
+auto PrintCollection(const T& collection) -> PrintIteratorRange<
+ typename std::common_type<decltype(std::begin(collection)),
+ decltype(std::end(collection))>::type> {
return {std::begin(collection), std::end(collection)};
}
diff --git a/chromium/v8/src/utils/v8dll-main.cc b/chromium/v8/src/utils/v8dll-main.cc
index 255f0d8dbff..6d7f390c8f6 100644
--- a/chromium/v8/src/utils/v8dll-main.cc
+++ b/chromium/v8/src/utils/v8dll-main.cc
@@ -4,7 +4,7 @@
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
-#undef USING_V8_SHARED // NOLINT
+#undef USING_V8_SHARED
#include "include/v8.h"
#if V8_OS_WIN
diff --git a/chromium/v8/src/utils/vector.h b/chromium/v8/src/utils/vector.h
index dad7abf0273..d085f14ab0e 100644
--- a/chromium/v8/src/utils/vector.h
+++ b/chromium/v8/src/utils/vector.h
@@ -28,7 +28,7 @@ class Vector {
constexpr Vector() : start_(nullptr), length_(0) {}
constexpr Vector(T* data, size_t length) : start_(data), length_(length) {
- CONSTEXPR_DCHECK(length == 0 || data != nullptr);
+ DCHECK(length == 0 || data != nullptr);
}
static Vector<T> New(size_t length) {
diff --git a/chromium/v8/src/wasm/OWNERS b/chromium/v8/src/wasm/OWNERS
index d0de7de9357..516dd84d6ee 100644
--- a/chromium/v8/src/wasm/OWNERS
+++ b/chromium/v8/src/wasm/OWNERS
@@ -3,5 +3,6 @@ bbudge@chromium.org
clemensb@chromium.org
gdeepti@chromium.org
jkummerow@chromium.org
+manoskouk@chromium.org
thibaudm@chromium.org
zhin@chromium.org
diff --git a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 62917ab0a34..acc7f08fa07 100644
--- a/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/chromium/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -605,6 +605,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
str(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
ldr(dst, liftoff::GetInstanceOperand());
}
@@ -743,7 +745,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
: MemOperand(dst_addr, actual_offset_reg);
str(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
// The write barrier.
Label write_barrier;
@@ -758,7 +760,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CallRecordWriteStub(dst_addr,
actual_offset_reg == no_reg ? Operand(offset_imm)
: Operand(actual_offset_reg),
- EMIT_REMEMBERED_SET, kSaveFPRegs,
+ RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -4227,6 +4229,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(sp, sp, Operand(size));
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index 39ef8528e52..63493edbd30 100644
--- a/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/chromium/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -127,37 +127,32 @@ template <typename T>
inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr,
Register offset, T offset_imm) {
- if (offset.is_valid()) {
- if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW);
- Register tmp = temps->AcquireX();
- DCHECK_GE(kMaxUInt32, offset_imm);
- assm->Add(tmp, offset.X(), offset_imm);
- return MemOperand(addr.X(), tmp);
- }
- return MemOperand(addr.X(), offset_imm);
-}
-
-// Certain load instructions do not support offset (register or immediate).
-// This creates a MemOperand that is suitable for such instructions by adding
-// |addr|, |offset| (if needed), and |offset_imm| into a temporary.
-inline MemOperand GetMemOpWithImmOffsetZero(LiftoffAssembler* assm,
- UseScratchRegisterScope* temps,
- Register addr, Register offset,
- uintptr_t offset_imm) {
+ if (!offset.is_valid()) return MemOperand(addr.X(), offset_imm);
+ Register effective_addr = addr.X();
+ if (offset_imm) {
+ effective_addr = temps->AcquireX();
+ assm->Add(effective_addr, addr.X(), offset_imm);
+ }
+ return MemOperand(effective_addr, offset.W(), UXTW);
+}
+
+// Compute the effective address (sum of |addr|, |offset| (if given) and
+// |offset_imm|) into a temporary register. This is needed for certain load
+// instructions that do not support an offset (register or immediate).
+// Returns |addr| if both |offset| and |offset_imm| are zero.
+inline Register GetEffectiveAddress(LiftoffAssembler* assm,
+ UseScratchRegisterScope* temps,
+ Register addr, Register offset,
+ uintptr_t offset_imm) {
+ if (!offset.is_valid() && offset_imm == 0) return addr;
Register tmp = temps->AcquireX();
if (offset.is_valid()) {
- // offset has passed BoundsCheckMem in liftoff-compiler, and been unsigned
- // extended, so it is fine to use the full width of the register.
- assm->Add(tmp, addr, offset);
- if (offset_imm != 0) {
- assm->Add(tmp, tmp, offset_imm);
- }
- } else {
- if (offset_imm != 0) {
- assm->Add(tmp, addr, offset_imm);
- }
+ // TODO(clemensb): This needs adaption for memory64.
+ assm->Add(tmp, addr, Operand(offset, UXTW));
+ addr = tmp;
}
- return MemOperand(tmp.X(), 0);
+ if (offset_imm != 0) assm->Add(tmp, addr, offset_imm);
+ return tmp;
}
enum class ShiftDirection : bool { kLeft, kRight };
@@ -430,6 +425,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
Str(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
Ldr(dst, liftoff::GetInstanceOperand());
}
@@ -458,13 +455,20 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
LiftoffRegister src,
LiftoffRegList pinned,
SkipWriteBarrier skip_write_barrier) {
- // Store the value.
UseScratchRegisterScope temps(this);
- MemOperand dst_op =
- liftoff::GetMemOp(this, &temps, dst_addr, offset_reg, offset_imm);
- StoreTaggedField(src.gp(), dst_op);
+ Operand offset_op = offset_reg.is_valid() ? Operand(offset_reg.W(), UXTW)
+ : Operand(offset_imm);
+ // For the write barrier (below), we cannot have both an offset register and
+ // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
+ // register, because that's needed in the MemOperand below.
+ if (offset_reg.is_valid() && offset_imm) {
+ Register effective_offset = temps.AcquireX();
+ Add(effective_offset.W(), offset_reg.W(), offset_imm);
+ offset_op = effective_offset;
+ }
+ StoreTaggedField(src.gp(), MemOperand(dst_addr.X(), offset_op));
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
// The write barrier.
Label write_barrier;
@@ -479,11 +483,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
}
CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, ne,
&exit);
- CallRecordWriteStub(
- dst_addr,
- dst_op.IsRegisterOffset() ? Operand(dst_op.regoffset().X())
- : Operand(dst_op.offset()),
- EMIT_REMEMBERED_SET, kSaveFPRegs, wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, offset_op, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -1333,7 +1334,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) {
switch (opcode) {
case kExprI32ConvertI64:
- if (src != dst) Mov(dst.gp().W(), src.gp().W());
+ Mov(dst.gp().W(), src.gp().W());
return true;
case kExprI32SConvertF32:
Fcvtzs(dst.gp().W(), src.fp().S()); // f32 -> i32 round to zero.
@@ -1491,11 +1492,11 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
}
void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
- sxtb(dst, src);
+ sxtb(dst.W(), src.W());
}
void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
- sxth(dst, src);
+ sxth(dst.W(), src.W());
}
void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
@@ -1629,8 +1630,8 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
UseScratchRegisterScope temps(this);
MemOperand src_op =
transform == LoadTransformationKind::kSplat
- ? liftoff::GetMemOpWithImmOffsetZero(this, &temps, src_addr,
- offset_reg, offset_imm)
+ ? MemOperand{liftoff::GetEffectiveAddress(this, &temps, src_addr,
+ offset_reg, offset_imm)}
: liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
*protected_load_pc = pc_offset();
MachineType memtype = type.mem_type();
@@ -1681,8 +1682,8 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
UseScratchRegisterScope temps(this);
- MemOperand src_op = liftoff::GetMemOpWithImmOffsetZero(
- this, &temps, addr, offset_reg, offset_imm);
+ MemOperand src_op{
+ liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
*protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
@@ -1708,8 +1709,8 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
StoreType type, uint8_t lane,
uint32_t* protected_store_pc) {
UseScratchRegisterScope temps(this);
- MemOperand dst_op =
- liftoff::GetMemOpWithImmOffsetZero(this, &temps, dst, offset, offset_imm);
+ MemOperand dst_op{
+ liftoff::GetEffectiveAddress(this, &temps, dst, offset, offset_imm)};
if (protected_store_pc) *protected_store_pc = pc_offset();
MachineRepresentation rep = type.mem_rep();
@@ -3221,6 +3222,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Drop(size, 1);
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
// The stack pointer is required to be quadword aligned.
diff --git a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index 83b00d4a2ad..9f35b5efc3f 100644
--- a/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/chromium/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -332,6 +332,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
mov(liftoff::GetInstanceOperand(), instance);
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
mov(dst, liftoff::GetInstanceOperand());
}
@@ -365,7 +367,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
: Operand(dst_addr, offset_reg, times_1, offset_imm);
mov(dst_op, src.gp());
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
@@ -380,8 +382,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
Label::kNear);
lea(scratch, dst_op);
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -2932,15 +2934,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), src.fp(), src.fp(), 0);
- } else {
- if (dst.fp() != src.fp()) {
- movss(dst.fp(), src.fp());
- }
- shufps(dst.fp(), src.fp(), 0);
- }
+ F32x4Splat(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -3262,13 +3256,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Pcmpeqd(dst.fp(), dst.fp());
- Pxor(dst.fp(), src.fp());
- } else {
- Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pxor(dst.fp(), liftoff::kScratchDoubleReg);
- }
+ S128Not(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3849,16 +3837,7 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- DoubleRegister reg =
- dst.fp() == src.fp() ? liftoff::kScratchDoubleReg : dst.fp();
- Pxor(reg, reg);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsubq(dst.fp(), reg, src.fp());
- } else {
- psubq(reg, src.fp());
- if (dst.fp() != reg) movaps(dst.fp(), reg);
- }
+ I64x2Neg(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
@@ -3892,7 +3871,7 @@ void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
// Set up a mask [0x80000000,0,0x80000000,0].
Pcmpeqb(tmp, tmp);
- Psllq(tmp, tmp, 63);
+ Psllq(tmp, tmp, byte{63});
Psrlq(tmp, tmp, shift);
if (CpuFeatures::IsSupported(AVX)) {
@@ -3911,11 +3890,11 @@ void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
XMMRegister tmp = liftoff::kScratchDoubleReg;
- int32_t shift = rhs & 63;
+ byte shift = rhs & 63;
// Set up a mask [0x80000000,0,0x80000000,0].
Pcmpeqb(tmp, tmp);
- Psllq(tmp, tmp, 63);
+ Psllq(tmp, tmp, byte{63});
Psrlq(tmp, tmp, shift);
liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlq, &Assembler::psrlq, 6>(
@@ -3959,13 +3938,13 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Movaps(tmp1.fp(), lhs.fp());
Movaps(tmp2.fp(), rhs.fp());
// Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), 32);
+ Psrlq(tmp1.fp(), byte{32});
Pmuludq(tmp1.fp(), tmp1.fp(), rhs.fp());
// Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), 32);
+ Psrlq(tmp2.fp(), byte{32});
Pmuludq(tmp2.fp(), tmp2.fp(), lhs.fp());
Paddq(tmp2.fp(), tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), tmp2.fp(), 32);
+ Psllq(tmp2.fp(), tmp2.fp(), byte{32});
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
this, dst, lhs, rhs);
Paddq(dst.fp(), dst.fp(), tmp2.fp());
@@ -4028,11 +4007,11 @@ void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 1);
+ Psrld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
Andps(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), dst.fp(), 1);
+ Psrld(dst.fp(), dst.fp(), byte{1});
Andps(dst.fp(), src.fp());
}
}
@@ -4041,11 +4020,11 @@ void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 31);
+ Pslld(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
Xorps(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), dst.fp(), 31);
+ Pslld(dst.fp(), dst.fp(), byte{31});
Xorps(dst.fp(), src.fp());
}
}
@@ -4184,11 +4163,11 @@ void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 1);
+ Psrlq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{1});
Andpd(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), dst.fp(), 1);
+ Psrlq(dst.fp(), dst.fp(), byte{1});
Andpd(dst.fp(), src.fp());
}
}
@@ -4197,11 +4176,11 @@ void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, 63);
+ Psllq(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{63});
Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), dst.fp(), 63);
+ Psllq(dst.fp(), dst.fp(), byte{63});
Xorpd(dst.fp(), src.fp());
}
}
@@ -4265,61 +4244,12 @@ void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminpd(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- minpd(liftoff::kScratchDoubleReg, dst.fp());
- minpd(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- minpd(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minpd(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orpd(liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmpunordpd(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Orpd(liftoff::kScratchDoubleReg, dst.fp());
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
+ F64x2Min(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxpd(liftoff::kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(liftoff::kScratchDoubleReg, src);
- maxpd(liftoff::kScratchDoubleReg, dst.fp());
- maxpd(dst.fp(), src);
- } else {
- movaps(liftoff::kScratchDoubleReg, lhs.fp());
- maxpd(liftoff::kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxpd(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorpd(dst.fp(), liftoff::kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orpd(liftoff::kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subpd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpunordpd(dst.fp(), dst.fp(), liftoff::kScratchDoubleReg);
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), liftoff::kScratchDoubleReg);
+ F64x2Max(dst.fp(), lhs.fp(), rhs.fp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4616,25 +4546,13 @@ void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
- }
+ F32x4ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufpd(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufpd(dst.fp(), dst.fp(), imm_lane_idx);
- }
+ F64x2ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@@ -4712,27 +4630,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- // TODO(fanchenk): Use movlhps and blendpd
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- if (imm_lane_idx == 0) {
- vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00000000);
- vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01010000);
- } else {
- vinsertps(dst.fp(), src1.fp(), src2.fp(), 0b00100000);
- vinsertps(dst.fp(), dst.fp(), src2.fp(), 0b01110000);
- }
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- if (imm_lane_idx == 0) {
- insertps(dst.fp(), src2.fp(), 0b00000000);
- insertps(dst.fp(), src2.fp(), 0b01010000);
- } else {
- insertps(dst.fp(), src2.fp(), 0b00100000);
- insertps(dst.fp(), src2.fp(), 0b01110000);
- }
- }
+ F64x2ReplaceLane(dst.fp(), src1.fp(), src2.fp(), imm_lane_idx);
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -4906,6 +4804,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
add(esp, Immediate(size));
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
index a544460ab98..f8b01ac960d 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.cc
@@ -738,22 +738,36 @@ void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
cache_state_.stack_state[stack_base + i]);
}
+ // Check whether the cached instance needs to be moved to another register.
+ // Register moves are executed as part of the {StackTransferRecipe}. Remember
+ // whether the register content has to be reloaded after executing the stack
+ // transfers.
+ bool reload_instance = false;
+ // If the registers match, or the destination has no cache register, nothing
+ // needs to be done.
if (cache_state_.cached_instance != target.cached_instance &&
target.cached_instance != no_reg) {
+ // On forward jumps, just reset the cached register in the target state.
if (jump_direction == kForwardJump) {
- // On forward jumps, just reset the cached instance in the target state.
target.ClearCachedInstanceRegister();
+ } else if (cache_state_.cached_instance != no_reg) {
+ // If the source has the content but in the wrong register, execute a
+ // register move as part of the stack transfer.
+ transfers.MoveRegister(LiftoffRegister{target.cached_instance},
+ LiftoffRegister{cache_state_.cached_instance},
+ kPointerKind);
} else {
- // On backward jumps, we already generated code assuming that the instance
- // is available in that register. Thus move it there.
- if (cache_state_.cached_instance == no_reg) {
- LoadInstanceFromFrame(target.cached_instance);
- } else {
- Move(target.cached_instance, cache_state_.cached_instance,
- kPointerKind);
- }
+ // Otherwise (the source state has no cached content), we reload later.
+ reload_instance = true;
}
}
+
+ // Now execute stack transfers and register moves/loads.
+ transfers.Execute();
+
+ if (reload_instance) {
+ LoadInstanceFromFrame(target.cached_instance);
+ }
}
void LiftoffAssembler::Spill(VarState* slot) {
diff --git a/chromium/v8/src/wasm/baseline/liftoff-assembler.h b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
index 3090bc81659..b0439dc4e10 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-assembler.h
@@ -341,6 +341,11 @@ class LiftoffAssembler : public TurboAssembler {
}
void clear_used(LiftoffRegister reg) {
+ if (reg.is_pair()) {
+ clear_used(reg.low());
+ clear_used(reg.high());
+ return;
+ }
register_use_count[reg.liftoff_code()] = 0;
used_registers.clear(reg);
}
@@ -633,6 +638,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void LoadTaggedPointerFromInstance(Register dst, Register instance,
int offset);
inline void SpillInstance(Register instance);
+ inline void ResetOSRTarget();
inline void FillInstanceInto(Register dst);
inline void LoadTaggedPointer(Register dst, Register src_addr,
Register offset_reg, int32_t offset_imm,
@@ -1416,6 +1422,9 @@ class LiftoffAssembler : public TurboAssembler {
inline void AllocateStackSlot(Register addr, uint32_t size);
inline void DeallocateStackSlot(uint32_t size);
+ // Instrumentation for shadow-stack-compatible OSR on x64.
+ inline void MaybeOSR();
+
////////////////////////////////////
// End of platform-specific part. //
////////////////////////////////////
diff --git a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
index a26df172252..e6048374015 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/chromium/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -10,7 +10,7 @@
#include "src/codegen/assembler-inl.h"
// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/external-reference.h"
-#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
@@ -75,7 +75,7 @@ struct assert_field_size {
__ LoadTaggedPointerFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
-#ifdef DEBUG
+#ifdef V8_CODE_COMMENTS
#define DEBUG_CODE_COMMENT(str) \
do { \
__ RecordComment(str); \
@@ -151,12 +151,7 @@ constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
case kExprI32GeU:
return kUnsignedGreaterEqual;
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
-#else
- // We need to return something for old compilers here.
- return kEqual;
-#endif
}
}
@@ -362,7 +357,6 @@ class LiftoffCompiler {
Label catch_label;
bool catch_reached = false;
bool in_handler = false;
- int32_t previous_catch = -1;
};
struct Control : public ControlBase<Value, validate> {
@@ -594,8 +588,7 @@ class LiftoffCompiler {
}
}
- // TODO(ahaas): Make this function constexpr once GCC allows it.
- LiftoffRegList RegsUnusedByParams() {
+ constexpr static LiftoffRegList RegsUnusedByParams() {
LiftoffRegList regs = kGpCacheRegList;
for (auto reg : kGpParamRegisters) {
regs.clear(reg);
@@ -620,8 +613,9 @@ class LiftoffCompiler {
// For reference type parameters we have to use registers that were not
// used for parameters because some reference type stack parameters may
// get processed before some value type register parameters.
+ static constexpr auto kRegsUnusedByParams = RegsUnusedByParams();
LiftoffRegister reg = is_reference(reg_kind)
- ? __ GetUnusedRegister(RegsUnusedByParams())
+ ? __ GetUnusedRegister(kRegsUnusedByParams)
: __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg;
@@ -742,6 +736,7 @@ class LiftoffCompiler {
// Store the instance parameter to a special stack slot.
__ SpillInstance(kWasmInstanceRegister);
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
+ if (for_debugging_) __ ResetOSRTarget();
// Process parameters.
if (num_params) DEBUG_CODE_COMMENT("process parameters");
@@ -909,6 +904,9 @@ class LiftoffCompiler {
ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset());
}
DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
+ if (is_stack_check) {
+ MaybeOSR();
+ }
if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
@@ -1046,6 +1044,7 @@ class LiftoffCompiler {
DefineSafepointWithCalleeSavedRegisters();
RegisterDebugSideTableEntry(decoder,
DebugSideTableBuilder::kAllowRegisters);
+ MaybeOSR();
}
void PushControl(Control* block) {
@@ -1072,16 +1071,14 @@ class LiftoffCompiler {
// Save the current cache state for the merge when jumping to this loop.
loop->label_state.Split(*__ cache_state());
+ PushControl(loop);
+
// Execute a stack check in the loop header.
StackCheck(decoder, decoder->position());
-
- PushControl(loop);
}
void Try(FullDecoder* decoder, Control* block) {
block->try_info = std::make_unique<TryInfo>();
- block->try_info->previous_catch = current_catch_;
- current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
PushControl(block);
}
@@ -1114,7 +1111,6 @@ class LiftoffCompiler {
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
- current_catch_ = block->try_info->previous_catch; // Pop try scope.
__ emit_jump(block->label.get());
// The catch block is unreachable if no possible throws in the try block
@@ -1181,6 +1177,7 @@ class LiftoffCompiler {
if (depth == decoder->control_depth() - 1) {
// Delegate to the caller, do not emit a landing pad.
Rethrow(decoder, __ cache_state()->stack_state.back());
+ MaybeOSR();
} else {
DCHECK(target->is_incomplete_try());
if (!target->try_info->catch_reached) {
@@ -1194,14 +1191,15 @@ class LiftoffCompiler {
__ emit_jump(&target->try_info->catch_label);
}
}
- current_catch_ = block->try_info->previous_catch;
}
void Rethrow(FullDecoder* decoder, Control* try_block) {
int index = try_block->try_info->catch_state.stack_height() - 1;
auto& exception = __ cache_state()->stack_state[index];
Rethrow(decoder, exception);
- EmitLandingPad(decoder);
+ int pc_offset = __ pc_offset();
+ MaybeOSR();
+ EmitLandingPad(decoder, pc_offset);
}
void CatchAll(FullDecoder* decoder, Control* block) {
@@ -1209,8 +1207,6 @@ class LiftoffCompiler {
block->is_try_unwind());
DCHECK_EQ(decoder->control_at(0), block);
- current_catch_ = block->try_info->previous_catch; // Pop try scope.
-
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
@@ -1340,8 +1336,6 @@ class LiftoffCompiler {
if (!c->label.get()->is_bound()) __ bind(c->label.get());
}
- void EndControl(FullDecoder* decoder, Control* c) {}
-
void GenerateCCall(const LiftoffRegister* result_regs,
const ValueKindSig* sig, ValueKind out_argument_kind,
const LiftoffRegister* arg_regs,
@@ -3152,6 +3146,32 @@ class LiftoffCompiler {
__ PushRegister(kRef, ref);
}
+ void BrOnNonNull(FullDecoder* decoder, const Value& ref_object,
+ uint32_t depth) {
+ // Before branching, materialize all constants. This avoids repeatedly
+ // materializing them for each conditional branch.
+ if (depth != decoder->control_depth() - 1) {
+ __ MaterializeMergedConstants(
+ decoder->control_at(depth)->br_merge()->arity);
+ }
+
+ Label cont_false;
+ LiftoffRegList pinned;
+ LiftoffRegister ref = pinned.set(__ PopToRegister(pinned));
+ // Put the reference back onto the stack for the branch.
+ __ PushRegister(kRef, ref);
+
+ Register null = __ GetUnusedRegister(kGpReg, pinned).gp();
+ LoadNullValue(null, pinned);
+ __ emit_cond_jump(kEqual, &cont_false, ref_object.type.kind(), ref.gp(),
+ null);
+
+ BrOrRet(decoder, depth, 0);
+ // Drop the reference if we are not branching.
+ __ DropValues(1);
+ __ bind(&cont_false);
+ }
+
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn>
void EmitTerOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
@@ -4054,22 +4074,22 @@ class LiftoffCompiler {
DCHECK_EQ(index, WasmExceptionPackage::GetEncodedSize(exception));
}
- void EmitLandingPad(FullDecoder* decoder) {
- if (current_catch_ == -1) return;
+ void EmitLandingPad(FullDecoder* decoder, int handler_offset) {
+ if (decoder->current_catch() == -1) return;
MovableLabel handler;
- int handler_offset = __ pc_offset();
// If we return from the throwing code normally, just skip over the handler.
Label skip_handler;
__ emit_jump(&skip_handler);
// Handler: merge into the catch state, and jump to the catch body.
+ DEBUG_CODE_COMMENT("-- landing pad --");
__ bind(handler.get());
__ ExceptionHandler();
__ PushException();
handlers_.push_back({std::move(handler), handler_offset});
Control* current_try =
- decoder->control_at(decoder->control_depth() - 1 - current_catch_);
+ decoder->control_at(decoder->control_depth_of_current_catch());
DCHECK_NOT_NULL(current_try->try_info);
if (!current_try->try_info->catch_reached) {
current_try->try_info->catch_state.InitMerge(
@@ -4102,6 +4122,7 @@ class LiftoffCompiler {
{LiftoffAssembler::VarState{
kSmiKind, LiftoffRegister{encoded_size_reg}, 0}},
decoder->position());
+ MaybeOSR();
// The FixedArray for the exception values is now in the first gp return
// register.
@@ -4136,7 +4157,9 @@ class LiftoffCompiler {
LiftoffAssembler::VarState{kPointerKind, values_array, 0}},
decoder->position());
- EmitLandingPad(decoder);
+ int pc_offset = __ pc_offset();
+ MaybeOSR();
+ EmitLandingPad(decoder, pc_offset);
}
void AtomicStoreMem(FullDecoder* decoder, StoreType type,
@@ -4260,6 +4283,7 @@ class LiftoffCompiler {
__ DropValues(1);
LiftoffRegister result = expected;
+ if (__ cache_state()->is_used(result)) __ SpillRegister(result);
// We already added the index to addr, so we can just pass no_reg to the
// assembler now.
@@ -4296,7 +4320,6 @@ class LiftoffCompiler {
std::initializer_list<LiftoffAssembler::VarState> params,
int position) {
DEBUG_CODE_COMMENT(
- // NOLINTNEXTLINE(whitespace/braces)
(std::string{"call builtin: "} + GetRuntimeStubName(stub_id)).c_str());
auto interface_descriptor = Builtins::CallInterfaceDescriptorFor(
RuntimeStubIdToBuiltinName(stub_id));
@@ -4810,6 +4833,18 @@ class LiftoffCompiler {
StoreObjectField(obj.gp(), no_reg, offset, value, pinned, field_kind);
pinned.clear(value);
}
+ if (imm.struct_type->field_count() == 0) {
+ static_assert(Heap::kMinObjectSizeInTaggedWords == 2 &&
+ WasmStruct::kHeaderSize == kTaggedSize,
+ "empty structs need exactly one padding field");
+ ValueKind field_kind = ValueKind::kRef;
+ LiftoffRegister value = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ LoadNullValue(value.gp(), pinned);
+ StoreObjectField(obj.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmStruct::kHeaderSize),
+ value, pinned, field_kind);
+ pinned.clear(value);
+ }
__ PushRegister(kRef, obj);
}
@@ -5426,6 +5461,7 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target);
+ FinishCall(decoder, sig, call_descriptor);
}
} else {
// A direct call within this module just gets the current instance.
@@ -5443,15 +5479,9 @@ class LiftoffCompiler {
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallNativeWasmCode(addr);
+ FinishCall(decoder, sig, call_descriptor);
}
}
-
- if (!tail_call) {
- DefineSafepoint();
- RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- EmitLandingPad(decoder);
- __ FinishCall(sig, call_descriptor);
- }
}
void CallIndirect(FullDecoder* decoder, const Value& index_val,
@@ -5546,7 +5576,6 @@ class LiftoffCompiler {
__ Load(LiftoffRegister(scratch), table, index, 0, LoadType::kI32Load,
pinned);
- // TODO(9495): Do not always compare signatures, same as wasm-compiler.cc.
// Compare against expected signature.
__ LoadConstant(LiftoffRegister(tmp_const), WasmValue(canonical_sig_num));
@@ -5617,10 +5646,7 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target);
- DefineSafepoint();
- RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- EmitLandingPad(decoder);
- __ FinishCall(sig, call_descriptor);
+ FinishCall(decoder, sig, call_descriptor);
}
}
@@ -5635,9 +5661,9 @@ class LiftoffCompiler {
call_descriptor =
GetLoweredCallDescriptor(compilation_zone_, call_descriptor);
- // Since this is a call instruction, we'll have to spill everything later
- // anyway; do it right away so that the register state tracking doesn't
- // get confused by the conditional builtin call below.
+ // Executing a write barrier needs temp registers; doing this on a
+ // conditional branch confuses the LiftoffAssembler's register management.
+ // Spill everything up front to work around that.
__ SpillAllRegisters();
// We limit ourselves to four registers:
@@ -5652,6 +5678,7 @@ class LiftoffCompiler {
LiftoffRegister target = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister temp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
+ // Load the WasmFunctionData.
LiftoffRegister func_data = func_ref;
__ LoadTaggedPointer(
func_data.gp(), func_ref.gp(), no_reg,
@@ -5662,144 +5689,65 @@ class LiftoffCompiler {
wasm::ObjectAccess::ToTagged(SharedFunctionInfo::kFunctionDataOffset),
pinned);
- LiftoffRegister data_type = instance;
- __ LoadMap(data_type.gp(), func_data.gp());
- __ Load(data_type, data_type.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(Map::kInstanceTypeOffset),
- LoadType::kI32Load16U, pinned);
+ // Load "ref" (instance or <instance, callable> pair) and target.
+ __ LoadTaggedPointer(
+ instance.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kRefOffset), pinned);
- Label is_js_function, perform_call;
- __ emit_i32_cond_jumpi(kEqual, &is_js_function, data_type.gp(),
- WASM_JS_FUNCTION_DATA_TYPE);
- // End of {data_type}'s live range.
+ Label load_target, perform_call;
+ // Check if "ref" is a Tuple2.
{
- // Call to a WasmExportedFunction.
-
- LiftoffRegister callee_instance = instance;
- __ LoadTaggedPointer(callee_instance.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kInstanceOffset),
- pinned);
- LiftoffRegister func_index = target;
- __ LoadSmiAsInt32(func_index, func_data.gp(),
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kFunctionIndexOffset),
- pinned);
- LiftoffRegister imported_function_refs = temp;
- __ LoadTaggedPointer(imported_function_refs.gp(), callee_instance.gp(),
- no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionRefsOffset),
- pinned);
- // We overwrite {imported_function_refs} here, at the cost of having
- // to reload it later, because we don't have more registers on ia32.
- LiftoffRegister imported_functions_num = imported_function_refs;
- __ LoadFixedArrayLengthAsInt32(imported_functions_num,
- imported_function_refs.gp(), pinned);
-
- Label imported;
- __ emit_cond_jump(kSignedLessThan, &imported, kI32, func_index.gp(),
- imported_functions_num.gp());
-
- {
- // Function locally defined in module.
-
- // {func_index} is invalid from here on.
- LiftoffRegister jump_table_start = target;
- __ Load(jump_table_start, callee_instance.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kJumpTableStartOffset),
- kPointerLoadType, pinned);
- LiftoffRegister jump_table_offset = temp;
- __ LoadSmiAsInt32(jump_table_offset, func_data.gp(),
- wasm::ObjectAccess::ToTagged(
- WasmExportedFunctionData::kJumpTableOffsetOffset),
+ LiftoffRegister pair_map = temp;
+ LiftoffRegister ref_map = target;
+ __ LoadMap(ref_map.gp(), instance.gp());
+ LOAD_INSTANCE_FIELD(pair_map.gp(), IsolateRoot, kSystemPointerSize,
pinned);
- __ emit_ptrsize_add(target.gp(), jump_table_start.gp(),
- jump_table_offset.gp());
- __ emit_jump(&perform_call);
- }
-
- {
- // Function imported to module.
- __ bind(&imported);
-
- LiftoffRegister imported_function_targets = temp;
- __ Load(imported_function_targets, callee_instance.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionTargetsOffset),
- kPointerLoadType, pinned);
- // {callee_instance} is invalid from here on.
- LiftoffRegister imported_instance = instance;
- // Scale {func_index} to kTaggedSize.
- __ emit_i32_shli(func_index.gp(), func_index.gp(), kTaggedSizeLog2);
- // {func_data} is invalid from here on.
- imported_function_refs = func_data;
- __ LoadTaggedPointer(
- imported_function_refs.gp(), callee_instance.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmInstanceObject::kImportedFunctionRefsOffset),
- pinned);
- __ LoadTaggedPointer(
- imported_instance.gp(), imported_function_refs.gp(),
- func_index.gp(),
- wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0), pinned);
- // Scale {func_index} to kSystemPointerSize.
- if (kSystemPointerSize == kTaggedSize * 2) {
- __ emit_i32_add(func_index.gp(), func_index.gp(), func_index.gp());
- } else {
- DCHECK_EQ(kSystemPointerSize, kTaggedSize);
- }
- // This overwrites the contents of {func_index}, which we don't need
- // any more.
- __ Load(target, imported_function_targets.gp(), func_index.gp(), 0,
- kPointerLoadType, pinned);
- __ emit_jump(&perform_call);
- }
- }
-
- {
- // Call to a WasmJSFunction. The call target is
- // function_data->wasm_to_js_wrapper_code()->instruction_start().
- // The instance_node is the pair
- // (current WasmInstanceObject, function_data->callable()).
- __ bind(&is_js_function);
-
- LiftoffRegister callable = temp;
- __ LoadTaggedPointer(
- callable.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(WasmJSFunctionData::kCallableOffset),
- pinned);
-
- // Preserve {func_data} across the call.
- LiftoffRegList saved_regs = LiftoffRegList::ForRegs(func_data);
- __ PushRegisters(saved_regs);
+ __ LoadTaggedPointer(pair_map.gp(), pair_map.gp(), no_reg,
+ IsolateData::root_slot_offset(RootIndex::kTuple2Map),
+ pinned);
+ __ emit_cond_jump(kUnequal, &load_target, kRef, ref_map.gp(),
+ pair_map.gp());
- LiftoffRegister current_instance = instance;
+ // Overwrite the tuple's "instance" entry with the current instance.
+ // TODO(jkummerow): Can we figure out a way to guarantee that the
+ // instance field is always precomputed?
+ LiftoffRegister current_instance = temp;
__ FillInstanceInto(current_instance.gp());
- LiftoffAssembler::VarState instance_var(kOptRef, current_instance, 0);
- LiftoffAssembler::VarState callable_var(kOptRef, callable, 0);
-
- CallRuntimeStub(WasmCode::kWasmAllocatePair,
- MakeSig::Returns(kOptRef).Params(kOptRef, kOptRef),
- {instance_var, callable_var}, decoder->position());
- if (instance.gp() != kReturnRegister0) {
- __ Move(instance.gp(), kReturnRegister0, kPointerKind);
- }
-
- // Restore {func_data}, which we saved across the call.
- __ PopRegisters(saved_regs);
+ __ StoreTaggedPointer(instance.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(Tuple2::kValue1Offset),
+ current_instance, pinned);
+ // Fall through to {load_target}.
+ }
+ // Load the call target.
+ __ bind(&load_target);
+
+#ifdef V8_HEAP_SANDBOX
+ LOAD_INSTANCE_FIELD(temp.gp(), IsolateRoot, kSystemPointerSize, pinned);
+ __ LoadExternalPointerField(
+ target.gp(),
+ FieldOperand(func_data.gp(), WasmFunctionData::kForeignAddressOffset),
+ kForeignForeignAddressTag, temp.gp(),
+ TurboAssembler::IsolateRootLocation::kInScratchRegister);
+#else
+ __ Load(
+ target, func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(WasmFunctionData::kForeignAddressOffset),
+ kPointerLoadType, pinned);
+#endif
- LiftoffRegister wrapper_code = target;
- __ LoadTaggedPointer(wrapper_code.gp(), func_data.gp(), no_reg,
- wasm::ObjectAccess::ToTagged(
- WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
- pinned);
- __ emit_ptrsize_addi(target.gp(), wrapper_code.gp(),
- wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
- // Fall through to {perform_call}.
- }
+ LiftoffRegister null_address = temp;
+ __ LoadConstant(null_address, WasmValue::ForUintPtr(0));
+ __ emit_cond_jump(kUnequal, &perform_call, kRef, target.gp(),
+ null_address.gp());
+ // The cached target can only be null for WasmJSFunctions.
+ __ LoadTaggedPointer(target.gp(), func_data.gp(), no_reg,
+ wasm::ObjectAccess::ToTagged(
+ WasmJSFunctionData::kWasmToJsWrapperCodeOffset),
+ pinned);
+ __ emit_ptrsize_addi(target.gp(), target.gp(),
+ wasm::ObjectAccess::ToTagged(Code::kHeaderSize));
+ // Fall through to {perform_call}.
__ bind(&perform_call);
// Now the call target is in {target}, and the right instance object
@@ -5818,18 +5766,14 @@ class LiftoffCompiler {
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallIndirect(sig, call_descriptor, target_reg);
- DefineSafepoint();
- RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
- EmitLandingPad(decoder);
- __ FinishCall(sig, call_descriptor);
+ FinishCall(decoder, sig, call_descriptor);
}
}
void LoadNullValue(Register null, LiftoffRegList pinned) {
LOAD_INSTANCE_FIELD(null, IsolateRoot, kSystemPointerSize, pinned);
- __ LoadTaggedPointer(null, null, no_reg,
- IsolateData::root_slot_offset(RootIndex::kNullValue),
- pinned);
+ __ LoadFullPointer(null, null,
+ IsolateData::root_slot_offset(RootIndex::kNullValue));
}
void LoadExceptionSymbol(Register dst, LiftoffRegList pinned,
@@ -5946,6 +5890,22 @@ class LiftoffCompiler {
WASM_STRUCT_TYPE - WASM_ARRAY_TYPE);
}
+ void MaybeOSR() {
+ if (V8_UNLIKELY(for_debugging_)) {
+ __ MaybeOSR();
+ }
+ }
+
+ void FinishCall(FullDecoder* decoder, ValueKindSig* sig,
+ compiler::CallDescriptor* call_descriptor) {
+ DefineSafepoint();
+ RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
+ int pc_offset = __ pc_offset();
+ MaybeOSR();
+ EmitLandingPad(decoder, pc_offset);
+ __ FinishCall(sig, call_descriptor);
+ }
+
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
static constexpr base::EnumSet<ValueKind> kUnconditionallySupported{
kI32, kI64, kF32, kF64};
@@ -5993,9 +5953,6 @@ class LiftoffCompiler {
// at the first breakable opcode in the function (if compiling for debugging).
bool did_function_entry_break_checks_ = false;
- // Depth of the current try block.
- int32_t current_catch_ = -1;
-
struct HandlerInfo {
MovableLabel handler;
int pc_offset;
diff --git a/chromium/v8/src/wasm/baseline/liftoff-register.h b/chromium/v8/src/wasm/baseline/liftoff-register.h
index bb27b99dc27..63ac2acf8bc 100644
--- a/chromium/v8/src/wasm/baseline/liftoff-register.h
+++ b/chromium/v8/src/wasm/baseline/liftoff-register.h
@@ -152,11 +152,12 @@ class LiftoffRegister {
"chosen type is small enough");
public:
- explicit LiftoffRegister(Register reg) : LiftoffRegister(reg.code()) {
+ constexpr explicit LiftoffRegister(Register reg)
+ : LiftoffRegister(reg.code()) {
DCHECK_NE(0, kLiftoffAssemblerGpCacheRegs & reg.bit());
DCHECK_EQ(reg, gp());
}
- explicit LiftoffRegister(DoubleRegister reg)
+ constexpr explicit LiftoffRegister(DoubleRegister reg)
: LiftoffRegister(kAfterMaxLiftoffGpRegCode + reg.code()) {
DCHECK_NE(0, kLiftoffAssemblerFpCacheRegs & reg.bit());
DCHECK_EQ(reg, fp());
@@ -275,22 +276,22 @@ class LiftoffRegister {
return DoubleRegister::from_code((code_ & kCodeMask) + 1);
}
- Register gp() const {
+ constexpr Register gp() const {
DCHECK(is_gp());
return Register::from_code(code_);
}
- DoubleRegister fp() const {
+ constexpr DoubleRegister fp() const {
DCHECK(is_fp());
return DoubleRegister::from_code(code_ - kAfterMaxLiftoffGpRegCode);
}
- int liftoff_code() const {
+ constexpr int liftoff_code() const {
STATIC_ASSERT(sizeof(int) >= sizeof(storage_t));
return static_cast<int>(code_);
}
- RegClass reg_class() const {
+ constexpr RegClass reg_class() const {
return is_fp_pair() ? kFpRegPair
: is_gp_pair() ? kGpRegPair : is_gp() ? kGpReg : kFpReg;
}
@@ -364,7 +365,7 @@ class LiftoffRegList {
return reg;
}
- LiftoffRegister clear(LiftoffRegister reg) {
+ constexpr LiftoffRegister clear(LiftoffRegister reg) {
if (reg.is_pair()) {
regs_ &= ~(storage_t{1} << reg.low().liftoff_code());
regs_ &= ~(storage_t{1} << reg.high().liftoff_code());
@@ -373,8 +374,10 @@ class LiftoffRegList {
}
return reg;
}
- Register clear(Register reg) { return clear(LiftoffRegister{reg}).gp(); }
- DoubleRegister clear(DoubleRegister reg) {
+ constexpr Register clear(Register reg) {
+ return clear(LiftoffRegister{reg}).gp();
+ }
+ constexpr DoubleRegister clear(DoubleRegister reg) {
return clear(LiftoffRegister{reg}).fp();
}
diff --git a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index ca715a8a328..58d2d8545cc 100644
--- a/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/chromium/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -433,6 +433,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
sw(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
lw(dst, liftoff::GetInstanceOperand());
}
@@ -468,7 +470,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
: MemOperand(dst_addr, offset_imm);
Sw(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
// The write barrier.
Label write_barrier;
@@ -483,8 +485,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
Addu(scratch, dst_op.rm(), dst_op.offset());
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -2835,7 +2837,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -2998,6 +3000,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addiu(sp, sp, size);
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index a5a9f8ce231..15b3b4f7c4d 100644
--- a/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/chromium/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -418,6 +418,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
Sd(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
}
@@ -448,7 +450,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Label write_barrier;
Label exit;
@@ -462,8 +464,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, eq,
&exit);
Daddu(scratch, dst_op.rm(), dst_op.offset());
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -2994,7 +2996,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -3166,6 +3168,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Daddu(sp, sp, size);
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/chromium/v8/src/wasm/baseline/ppc/OWNERS b/chromium/v8/src/wasm/baseline/ppc/OWNERS
deleted file mode 100644
index 02c2cd757c9..00000000000
--- a/chromium/v8/src/wasm/baseline/ppc/OWNERS
+++ /dev/null
@@ -1,5 +0,0 @@
-junyan@redhat.com
-joransiu@ca.ibm.com
-midawson@redhat.com
-mfarazma@redhat.com
-vasili.skurydzin@ibm.com
diff --git a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 4e99821a27d..10d574301e6 100644
--- a/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/chromium/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -45,6 +45,47 @@ inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
return MemOperand(fp, -kInstanceOffset - offset + half_offset);
}
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ case kUnsignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ case kUnsignedLessEqual:
+ return le;
+ case kSignedGreaterEqual:
+ case kUnsignedGreaterEqual:
+ return ge;
+ case kSignedGreaterThan:
+ case kUnsignedGreaterThan:
+ return gt;
+ }
+}
+
+inline constexpr bool UseSignedOp(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ case kUnequal:
+ case kSignedLessThan:
+ case kSignedLessEqual:
+ case kSignedGreaterThan:
+ case kSignedGreaterEqual:
+ return true;
+ case kUnsignedLessThan:
+ case kUnsignedLessEqual:
+ case kUnsignedGreaterThan:
+ case kUnsignedGreaterEqual:
+ return false;
+ default:
+ UNREACHABLE();
+ }
+ return false;
+}
+
} // namespace liftoff
int LiftoffAssembler::PrepareStackFrame() {
@@ -87,7 +128,30 @@ bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
RelocInfo::Mode rmode) {
- bailout(kUnsupportedArchitecture, "LoadConstant");
+ switch (value.type().kind()) {
+ case kI32:
+ mov(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ mov(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
+ MovIntToFloat(reg.fp(), scratch);
+ break;
+ }
+ case kF64: {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
+ MovInt64ToDouble(reg.fp(), scratch);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
@@ -109,6 +173,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
bailout(kUnsupportedArchitecture, "SpillInstance");
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
bailout(kUnsupportedArchitecture, "FillInstanceInto");
}
@@ -524,56 +590,123 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
bailout(kUnsupportedArchitecture, "emit_i64_signextend_i32");
}
-void LiftoffAssembler::emit_jump(Label* label) {
- bailout(kUnsupportedArchitecture, "emit_jump");
-}
+void LiftoffAssembler::emit_jump(Label* label) { b(al, label); }
-void LiftoffAssembler::emit_jump(Register target) {
- bailout(kUnsupportedArchitecture, "emit_jump");
-}
+void LiftoffAssembler::emit_jump(Register target) { Jump(target); }
void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
Label* label, ValueKind kind,
Register lhs, Register rhs) {
- bailout(kUnsupportedArchitecture, "emit_cond_jump");
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+
+ if (rhs != no_reg) {
+ switch (kind) {
+ case kI32:
+ if (use_signed) {
+ cmpw(lhs, rhs);
+ } else {
+ cmplw(lhs, rhs);
+ }
+ break;
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ DCHECK(liftoff_cond == kEqual || liftoff_cond == kUnequal);
+ V8_FALLTHROUGH;
+ case kI64:
+ if (use_signed) {
+ cmp(lhs, rhs);
+ } else {
+ cmpl(lhs, rhs);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ DCHECK_EQ(kind, kI32);
+ CHECK(use_signed);
+ cmpwi(lhs, Operand::Zero());
+ }
+
+ b(cond, label);
}
void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
Label* label, Register lhs,
int32_t imm) {
- bailout(kUnsupportedArchitecture, "emit_i32_cond_jumpi");
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Cmpwi(lhs, Operand(imm), r0);
+ b(cond, label);
}
void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
- bailout(kUnsupportedArchitecture, "emit_i32_eqz");
+ Label done;
+ cmpwi(src, Operand(0));
+ mov(dst, Operand(1));
+ beq(&done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
Register dst, Register lhs,
Register rhs) {
- bailout(kUnsupportedArchitecture, "emit_i32_set_cond");
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ if (use_signed) {
+ cmpw(lhs, rhs);
+ } else {
+ cmplw(lhs, rhs);
+ }
+ Label done;
+ mov(dst, Operand(1));
+ b(liftoff::ToCondition(liftoff_cond), &done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
- bailout(kUnsupportedArchitecture, "emit_i64_eqz");
+ Label done;
+ cmpi(src.gp(), Operand(0));
+ mov(dst, Operand(1));
+ beq(&done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
Register dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_i64_set_cond");
+ bool use_signed = liftoff::UseSignedOp(liftoff_cond);
+ if (use_signed) {
+ cmp(lhs.gp(), rhs.gp());
+ } else {
+ cmpl(lhs.gp(), rhs.gp());
+ }
+ Label done;
+ mov(dst, Operand(1));
+ b(liftoff::ToCondition(liftoff_cond), &done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f32_set_cond");
+ fcmpu(lhs, rhs);
+ Label done;
+ mov(dst, Operand(1));
+ b(liftoff::ToCondition(liftoff_cond), &done);
+ mov(dst, Operand::Zero());
+ bind(&done);
}
void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- bailout(kUnsupportedArchitecture, "emit_f64_set_cond");
+ emit_f32_set_cond(liftoff_cond, dst, lhs, rhs);
}
bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
@@ -1801,6 +1934,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
}
diff --git a/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index 47f8ce2125d..3f549a3df63 100644
--- a/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/chromium/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -325,9 +325,9 @@ void LiftoffAssembler::PatchPrepareStackFrame(int offset) {
patching_assembler.Add64(sp, sp, Operand(-frame_size));
}
-void LiftoffAssembler::FinishCode() {}
+void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); }
-void LiftoffAssembler::AbortCompilation() {}
+void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); }
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
@@ -382,12 +382,19 @@ void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
int offset, int size) {
DCHECK_LE(0, offset);
- DCHECK(size == 4 || size == 8);
MemOperand src{instance, offset};
- if (size == 4) {
- Lw(dst, src);
- } else {
- Ld(dst, src);
+ switch (size) {
+ case 1:
+ Lb(dst, MemOperand(src));
+ break;
+ case 4:
+ Lw(dst, MemOperand(src));
+ break;
+ case 8:
+ Ld(dst, MemOperand(src));
+ break;
+ default:
+ UNIMPLEMENTED();
}
}
@@ -401,6 +408,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
Sd(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
Ld(dst, liftoff::GetInstanceOperand());
}
@@ -414,6 +423,12 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
Ld(dst, src_op);
}
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld(dst, src_op);
+}
+
void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
Register offset_reg,
int32_t offset_imm,
@@ -425,7 +440,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
Sd(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Label write_barrier;
Label exit;
@@ -437,9 +452,9 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
JumpIfSmi(src.gp(), &exit);
CheckPageFlag(src.gp(), scratch,
MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit);
- Add64(scratch, dst_addr, offset_imm);
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ Add64(scratch, dst_op.rm(), dst_op.offset());
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -543,60 +558,297 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
}
}
+namespace liftoff {
+#define __ lasm->
+
+inline Register CalculateActualAddress(LiftoffAssembler* lasm,
+ Register addr_reg, Register offset_reg,
+ uintptr_t offset_imm,
+ Register result_reg) {
+ DCHECK_NE(offset_reg, no_reg);
+ DCHECK_NE(addr_reg, no_reg);
+ __ Add64(result_reg, addr_reg, Operand(offset_reg));
+ if (offset_imm != 0) {
+ __ Add64(result_reg, result_reg, Operand(offset_imm));
+ }
+ return result_reg;
+}
+
+enum class Binop { kAdd, kSub, kAnd, kOr, kXor, kExchange };
+
+inline void AtomicBinop(LiftoffAssembler* lasm, Register dst_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister value, LiftoffRegister result,
+ StoreType type, Binop op) {
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result);
+ Register store_result = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
+
+ // Make sure that {result} is unique.
+ Register result_reg = result.gp();
+ if (result_reg == value.gp() || result_reg == dst_addr ||
+ result_reg == offset_reg) {
+ result_reg = __ GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(lasm);
+ Register actual_addr = liftoff::CalculateActualAddress(
+ lasm, dst_addr, offset_reg, offset_imm, temps.Acquire());
+
+ // Allocate an additional {temp} register to hold the result that should be
+ // stored to memory. Note that {temp} and {store_result} are not allowed to be
+ // the same register.
+ Register temp = temps.Acquire();
+
+ Label retry;
+ __ bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ lbu(result_reg, actual_addr, 0);
+ __ sync();
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ lhu(result_reg, actual_addr, 0);
+ __ sync();
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ lr_w(true, false, result_reg, actual_addr);
+ break;
+ case StoreType::kI64Store:
+ __ lr_d(true, false, result_reg, actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ switch (op) {
+ case Binop::kAdd:
+ __ add(temp, result_reg, value.gp());
+ break;
+ case Binop::kSub:
+ __ sub(temp, result_reg, value.gp());
+ break;
+ case Binop::kAnd:
+ __ and_(temp, result_reg, value.gp());
+ break;
+ case Binop::kOr:
+ __ or_(temp, result_reg, value.gp());
+ break;
+ case Binop::kXor:
+ __ xor_(temp, result_reg, value.gp());
+ break;
+ case Binop::kExchange:
+ __ mv(temp, value.gp());
+ break;
+ }
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ __ sync();
+ __ sb(temp, actual_addr, 0);
+ __ sync();
+ __ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ __ sync();
+ __ sh(temp, actual_addr, 0);
+ __ sync();
+ __ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ __ sc_w(false, true, store_result, actual_addr, temp);
+ break;
+ case StoreType::kI64Store:
+ __ sc_w(false, true, store_result, actual_addr, temp);
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ __ bnez(store_result, &retry);
+ if (result_reg != result.gp()) {
+ __ mv(result.gp(), result_reg);
+ }
+}
+
+#undef __
+} // namespace liftoff
+
void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicLoad");
+ UseScratchRegisterScope temps(this);
+ Register src_reg = liftoff::CalculateActualAddress(
+ this, src_addr, offset_reg, offset_imm, temps.Acquire());
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ lbu(dst.gp(), src_reg, 0);
+ sync();
+ return;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ lhu(dst.gp(), src_reg, 0);
+ sync();
+ return;
+ case LoadType::kI32Load:
+ lr_w(true, true, dst.gp(), src_reg);
+ return;
+ case LoadType::kI64Load32U:
+ lr_w(true, true, dst.gp(), src_reg);
+ slli(dst.gp(), dst.gp(), 32);
+ srli(dst.gp(), dst.gp(), 32);
+ return;
+ case LoadType::kI64Load:
+ lr_d(true, true, dst.gp(), src_reg);
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src,
StoreType type, LiftoffRegList pinned) {
- bailout(kAtomics, "AtomicStore");
+ UseScratchRegisterScope temps(this);
+ Register dst_reg = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.Acquire());
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ sync();
+ sb(src.gp(), dst_reg, 0);
+ sync();
+ return;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ sync();
+ sh(src.gp(), dst_reg, 0);
+ sync();
+ return;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ sc_w(true, true, zero_reg, dst_reg, src.gp());
+ return;
+ case StoreType::kI64Store:
+ sc_d(true, true, zero_reg, dst_reg, src.gp());
+ return;
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAdd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAdd);
}
void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicSub");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kSub);
}
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kAnd);
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kOr);
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kXor);
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
uintptr_t offset_imm,
LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicExchange");
+ liftoff::AtomicBinop(this, dst_addr, offset_reg, offset_imm, value, result,
+ type, liftoff::Binop::kExchange);
}
void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uintptr_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
- bailout(kAtomics, "AtomicCompareExchange");
+ LiftoffRegList pinned =
+ LiftoffRegList::ForRegs(dst_addr, offset_reg, expected, new_value);
+
+ Register result_reg = result.gp();
+ if (pinned.has(result)) {
+ result_reg = GetUnusedRegister(kGpReg, pinned).gp();
+ }
+
+ UseScratchRegisterScope temps(this);
+
+ Register actual_addr = liftoff::CalculateActualAddress(
+ this, dst_addr, offset_reg, offset_imm, temps.Acquire());
+
+ Register store_result = temps.Acquire();
+
+ Label retry;
+ Label done;
+ bind(&retry);
+ switch (type.value()) {
+ case StoreType::kI64Store8:
+ case StoreType::kI32Store8:
+ lbu(result_reg, actual_addr, 0);
+ sync();
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sync();
+ sb(new_value.gp(), actual_addr, 0);
+ sync();
+ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store16:
+ case StoreType::kI32Store16:
+ lhu(result_reg, actual_addr, 0);
+ sync();
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sync();
+ sh(new_value.gp(), actual_addr, 0);
+ sync();
+ mv(store_result, zero_reg);
+ break;
+ case StoreType::kI64Store32:
+ case StoreType::kI32Store:
+ lr_w(true, true, result_reg, actual_addr);
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sc_w(true, true, store_result, new_value.gp(), actual_addr);
+ break;
+ case StoreType::kI64Store:
+ lr_d(true, true, result_reg, actual_addr);
+ Branch(&done, ne, result.gp(), Operand(expected.gp()));
+ sc_d(true, true, store_result, new_value.gp(), actual_addr);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ bnez(store_result, &retry);
+ bind(&done);
+
+ if (result_reg != result.gp()) {
+ mv(result.gp(), result_reg);
+ }
}
void LiftoffAssembler::AtomicFence() { sync(); }
@@ -2412,7 +2664,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() {
}
void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
- if (emit_debug_code()) Abort(reason);
+ if (FLAG_debug_code) Abort(reason);
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
@@ -2542,8 +2794,8 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
compiler::CallDescriptor* call_descriptor,
Register target) {
if (target == no_reg) {
- pop(kScratchReg);
- Call(kScratchReg);
+ pop(t6);
+ Call(t6);
} else {
Call(target);
}
@@ -2551,8 +2803,8 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
void LiftoffAssembler::TailCallIndirect(Register target) {
if (target == no_reg) {
- Pop(kScratchReg);
- Jump(kScratchReg);
+ Pop(t6);
+ Jump(t6);
} else {
Jump(target);
}
@@ -2573,6 +2825,7 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
Add64(sp, sp, Operand(size));
}
+void LiftoffAssembler::MaybeOSR() {}
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
diff --git a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 8560c91553f..4c230ed305d 100644
--- a/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/chromium/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -224,6 +224,8 @@ void LiftoffAssembler::SpillInstance(Register instance) {
StoreU64(instance, liftoff::GetInstanceOperand());
}
+void LiftoffAssembler::ResetOSRTarget() {}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
LoadU64(dst, liftoff::GetInstanceOperand());
}
@@ -254,7 +256,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm);
StoreTaggedField(src.gp(), dst_op);
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Label write_barrier;
Label exit;
@@ -269,8 +271,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingMask,
eq, &exit);
lay(r1, dst_op);
- CallRecordWriteStub(dst_addr, r1, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, r1, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -665,19 +667,253 @@ void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicAnd");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label do_again;
+ bind(&do_again);
+ LoadU8(tmp1, MemOperand(ip));
+ AndP(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label do_again;
+ bind(&do_again);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ AndP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ AndP(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label do_again;
+ bind(&do_again);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ AndP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ AndP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label do_again;
+ bind(&do_again);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ AndP(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ AndP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicOr");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label do_again;
+ bind(&do_again);
+ LoadU8(tmp1, MemOperand(ip));
+ OrP(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label do_again;
+ bind(&do_again);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ OrP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ OrP(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label do_again;
+ bind(&do_again);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ OrP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ OrP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label do_again;
+ bind(&do_again);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ OrP(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ OrP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister value,
LiftoffRegister result, StoreType type) {
- bailout(kAtomics, "AtomicXor");
+ Register tmp1 =
+ GetUnusedRegister(
+ kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg, value, result))
+ .gp();
+ Register tmp2 =
+ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst_addr, offset_reg,
+ value, result, tmp1))
+ .gp();
+
+ lay(ip,
+ MemOperand(dst_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm));
+
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8: {
+ Label do_again;
+ bind(&do_again);
+ LoadU8(tmp1, MemOperand(ip));
+ XorP(tmp2, tmp1, value.gp());
+ AtomicCmpExchangeU8(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU8(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16: {
+ Label do_again;
+ bind(&do_again);
+ LoadU16(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+ XorP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+ ShiftRightU32(tmp2, tmp2, Operand(16));
+#else
+ XorP(tmp2, tmp1, value.gp());
+#endif
+ AtomicCmpExchangeU16(ip, result.gp(), tmp1, tmp2, r0, r1);
+ b(Condition(4), &do_again);
+ LoadU16(result.gp(), result.gp());
+ break;
+ }
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32: {
+ Label do_again;
+ bind(&do_again);
+ LoadU32(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvr(tmp2, tmp1);
+ XorP(tmp2, tmp2, value.gp());
+ lrvr(tmp2, tmp2);
+#else
+ XorP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ LoadU32(result.gp(), tmp1);
+ break;
+ }
+ case StoreType::kI64Store: {
+ Label do_again;
+ bind(&do_again);
+ LoadU64(tmp1, MemOperand(ip));
+#ifdef V8_TARGET_BIG_ENDIAN
+ lrvgr(tmp2, tmp1);
+ XorP(tmp2, tmp2, value.gp());
+ lrvgr(tmp2, tmp2);
+#else
+ XorP(tmp2, tmp1, value.gp());
+#endif
+ CmpAndSwap64(tmp1, tmp2, MemOperand(ip));
+ b(Condition(4), &do_again);
+ mov(result.gp(), tmp1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
}
void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
@@ -1162,9 +1398,19 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
LFR_TO_REG, LFR_TO_REG, USE, , void) \
V(i64_ctz, CountTrailingZerosU64, LiftoffRegister, LiftoffRegister, \
LFR_TO_REG, LFR_TO_REG, USE, , void) \
+ V(f32_ceil, CeilF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f32_floor, FloorF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f32_trunc, TruncF32, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f32_nearest_int, NearestIntF32, DoubleRegister, DoubleRegister, , , USE, \
+ true, bool) \
V(f32_abs, lpebr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f32_neg, lcebr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f32_sqrt, sqebr, DoubleRegister, DoubleRegister, , , USE, , void) \
+ V(f64_ceil, CeilF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_floor, FloorF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_trunc, TruncF64, DoubleRegister, DoubleRegister, , , USE, true, bool) \
+ V(f64_nearest_int, NearestIntF64, DoubleRegister, DoubleRegister, , , USE, \
+ true, bool) \
V(f64_abs, lpdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_neg, lcdbr, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_sqrt, sqdbr, DoubleRegister, DoubleRegister, , , USE, , void)
@@ -1185,6 +1431,14 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
// V(name, instr, dtype, stype1, stype2, dcast, scast1, scast2, rcast,
// return_val, return_type)
#define BINOP_LIST(V) \
+ V(f32_min, FloatMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f32_max, FloatMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_min, DoubleMin, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
+ V(f64_max, DoubleMax, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
+ USE, , void) \
V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
@@ -1279,84 +1533,6 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
-bool LiftoffAssembler::emit_f32_ceil(DoubleRegister dst, DoubleRegister src) {
- fiebra(ROUND_TOWARD_POS_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f32_floor(DoubleRegister dst, DoubleRegister src) {
- fiebra(ROUND_TOWARD_NEG_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f32_trunc(DoubleRegister dst, DoubleRegister src) {
- fiebra(ROUND_TOWARD_0, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
- DoubleRegister src) {
- fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
- return true;
-}
-
-void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmin(dst, lhs, rhs, Condition(1), Condition(8), Condition(3));
- return;
- }
- DoubleMin(dst, lhs, rhs);
-}
-
-void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmin(dst, lhs, rhs, Condition(1), Condition(8), Condition(2));
- return;
- }
- FloatMin(dst, lhs, rhs);
-}
-
-bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) {
- fidbra(ROUND_TOWARD_POS_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) {
- fidbra(ROUND_TOWARD_NEG_INF, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) {
- fidbra(ROUND_TOWARD_0, dst, src);
- return true;
-}
-
-bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
- DoubleRegister src) {
- fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src);
- return true;
-}
-
-void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmax(dst, lhs, rhs, Condition(1), Condition(8), Condition(3));
- return;
- }
- DoubleMax(dst, lhs, rhs);
-}
-
-void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
- DoubleRegister rhs) {
- if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) {
- vfmax(dst, lhs, rhs, Condition(1), Condition(8), Condition(2));
- return;
- }
- FloatMax(dst, lhs, rhs);
-}
-
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
@@ -1890,7 +2066,9 @@ bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
SmiCheckMode mode) {
- bailout(kUnsupportedArchitecture, "emit_smi_check");
+ TestIfSmi(obj);
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target); // branch if SMI
}
void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
@@ -3207,6 +3385,8 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
lay(sp, MemOperand(sp, size));
}
+void LiftoffAssembler::MaybeOSR() {}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index e8a57bafca1..3da9656b42c 100644
--- a/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/chromium/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -70,6 +70,8 @@ inline Operand GetStackSlot(int offset) { return Operand(rbp, -offset); }
// TODO(clemensb): Make this a constexpr variable once Operand is constexpr.
inline Operand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+inline Operand GetOSRTargetSlot() { return GetStackSlot(kOSRTargetOffset); }
+
inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
uintptr_t offset_imm) {
if (is_uint31(offset_imm)) {
@@ -79,7 +81,7 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, Register offset,
}
// Offset immediate does not fit in 31 bits.
Register scratch = kScratchRegister;
- assm->Set(scratch, offset_imm);
+ assm->TurboAssembler::Move(scratch, offset_imm);
if (offset != no_reg) assm->addq(scratch, offset);
return Operand(addr, scratch, times_1, 0);
}
@@ -249,7 +251,7 @@ void LiftoffAssembler::AbortCompilation() {}
// static
constexpr int LiftoffAssembler::StaticStackFrameSize() {
- return liftoff::kInstanceOffset;
+ return kOSRTargetOffset;
}
int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
@@ -272,7 +274,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
break;
case kI64:
if (RelocInfo::IsNone(rmode)) {
- TurboAssembler::Set(reg.gp(), value.to_i64());
+ TurboAssembler::Move(reg.gp(), value.to_i64());
} else {
movq(reg.gp(), Immediate64(value.to_i64(), rmode));
}
@@ -322,6 +324,10 @@ void LiftoffAssembler::SpillInstance(Register instance) {
movq(liftoff::GetInstanceOperand(), instance);
}
+void LiftoffAssembler::ResetOSRTarget() {
+ movq(liftoff::GetOSRTargetSlot(), Immediate(0));
+}
+
void LiftoffAssembler::FillInstanceInto(Register dst) {
movq(dst, liftoff::GetInstanceOperand());
}
@@ -331,7 +337,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
int32_t offset_imm,
LiftoffRegList pinned) {
DCHECK_GE(offset_imm, 0);
- if (emit_debug_code() && offset_reg != no_reg) {
+ if (FLAG_debug_code && offset_reg != no_reg) {
AssertZeroExtended(offset_reg);
}
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg,
@@ -357,7 +363,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
static_cast<uint32_t>(offset_imm));
StoreTaggedField(dst_op, src.gp());
- if (skip_write_barrier) return;
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Label write_barrier;
@@ -375,8 +381,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
MemoryChunk::kPointersToHereAreInterestingMask, zero, &exit,
Label::kNear);
leaq(scratch, dst_op);
- CallRecordWriteStub(dst_addr, scratch, EMIT_REMEMBERED_SET, kSaveFPRegs,
- wasm::WasmCode::kRecordWrite);
+ CallRecordWriteStub(dst_addr, scratch, RememberedSetAction::kEmit,
+ SaveFPRegsMode::kSave, wasm::WasmCode::kRecordWrite);
bind(&exit);
}
@@ -1253,7 +1259,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
int64_t imm) {
if (!is_int32(imm)) {
- TurboAssembler::Set(kScratchRegister, imm);
+ TurboAssembler::Move(kScratchRegister, imm);
if (lhs.gp() == dst.gp()) {
addq(dst.gp(), kScratchRegister);
} else {
@@ -2534,7 +2540,7 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Shufps(dst.fp(), src.fp(), src.fp(), 0);
+ F32x4Splat(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
@@ -2846,13 +2852,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- if (dst.fp() != src.fp()) {
- Pcmpeqd(dst.fp(), dst.fp());
- Pxor(dst.fp(), src.fp());
- } else {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pxor(dst.fp(), kScratchDoubleReg);
- }
+ S128Not(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3429,15 +3429,7 @@ void LiftoffAssembler::emit_i32x4_extmul_high_i16x8_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- DoubleRegister reg = dst.fp() == src.fp() ? kScratchDoubleReg : dst.fp();
- Pxor(reg, reg);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsubq(dst.fp(), reg, src.fp());
- } else {
- psubq(reg, src.fp());
- if (dst.fp() != reg) movaps(dst.fp(), reg);
- }
+ I64x2Neg(dst.fp(), src.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
@@ -3504,13 +3496,13 @@ void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
Movaps(tmp1.fp(), lhs.fp());
Movaps(tmp2.fp(), rhs.fp());
// Multiply high dword of each qword of left with right.
- Psrlq(tmp1.fp(), 32);
+ Psrlq(tmp1.fp(), byte{32});
Pmuludq(tmp1.fp(), rhs.fp());
// Multiply high dword of each qword of right with left.
- Psrlq(tmp2.fp(), 32);
+ Psrlq(tmp2.fp(), byte{32});
Pmuludq(tmp2.fp(), lhs.fp());
Paddq(tmp2.fp(), tmp1.fp());
- Psllq(tmp2.fp(), 32);
+ Psllq(tmp2.fp(), byte{32});
liftoff::EmitSimdCommutativeBinOp<&Assembler::vpmuludq, &Assembler::pmuludq>(
this, dst, lhs, rhs);
Paddq(dst.fp(), tmp2.fp());
@@ -3586,11 +3578,11 @@ void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pslld(kScratchDoubleReg, static_cast<byte>(31));
+ Pslld(kScratchDoubleReg, byte{31});
Xorps(dst.fp(), kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), static_cast<byte>(31));
+ Pslld(dst.fp(), byte{31});
Xorps(dst.fp(), src.fp());
}
}
@@ -3674,7 +3666,7 @@ void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
// propagate -0's and NaNs, which may be non-canonical.
Orps(kScratchDoubleReg, dst.fp());
// Canonicalize NaNs by quieting and clearing the payload.
- Cmpps(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Cmpunordps(dst.fp(), kScratchDoubleReg);
Orps(kScratchDoubleReg, dst.fp());
Psrld(dst.fp(), byte{10});
Andnps(dst.fp(), kScratchDoubleReg);
@@ -3706,7 +3698,7 @@ void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
// Propagate sign discrepancy and (subtle) quiet NaNs.
Subps(kScratchDoubleReg, dst.fp());
// Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmpps(dst.fp(), kScratchDoubleReg, int8_t{3});
+ Cmpunordps(dst.fp(), kScratchDoubleReg);
Psrld(dst.fp(), byte{10});
Andnps(dst.fp(), kScratchDoubleReg);
}
@@ -3729,11 +3721,11 @@ void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
if (dst.fp() == src.fp()) {
Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrlq(kScratchDoubleReg, static_cast<byte>(1));
+ Psrlq(kScratchDoubleReg, byte{1});
Andpd(dst.fp(), kScratchDoubleReg);
} else {
Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), static_cast<byte>(1));
+ Psrlq(dst.fp(), byte{1});
Andpd(dst.fp(), src.fp());
}
}
@@ -3810,61 +3802,12 @@ void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The minpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform minpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vminpd(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vminpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- minpd(kScratchDoubleReg, dst.fp());
- minpd(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- minpd(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- minpd(dst.fp(), lhs.fp());
- }
- // propagate -0's and NaNs, which may be non-canonical.
- Orpd(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by quieting and clearing the payload.
- Cmppd(dst.fp(), kScratchDoubleReg, int8_t{3});
- Orpd(kScratchDoubleReg, dst.fp());
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), kScratchDoubleReg);
+ F64x2Min(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- // The maxpd instruction doesn't propagate NaNs and +0's in its first
- // operand. Perform maxpd in both orders, merge the results, and adjust.
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vmaxpd(kScratchDoubleReg, lhs.fp(), rhs.fp());
- vmaxpd(dst.fp(), rhs.fp(), lhs.fp());
- } else if (dst.fp() == lhs.fp() || dst.fp() == rhs.fp()) {
- XMMRegister src = dst.fp() == lhs.fp() ? rhs.fp() : lhs.fp();
- movaps(kScratchDoubleReg, src);
- maxpd(kScratchDoubleReg, dst.fp());
- maxpd(dst.fp(), src);
- } else {
- movaps(kScratchDoubleReg, lhs.fp());
- maxpd(kScratchDoubleReg, rhs.fp());
- movaps(dst.fp(), rhs.fp());
- maxpd(dst.fp(), lhs.fp());
- }
- // Find discrepancies.
- Xorpd(dst.fp(), kScratchDoubleReg);
- // Propagate NaNs, which may be non-canonical.
- Orpd(kScratchDoubleReg, dst.fp());
- // Propagate sign discrepancy and (subtle) quiet NaNs.
- Subpd(kScratchDoubleReg, dst.fp());
- // Canonicalize NaNs by clearing the payload. Sign is non-deterministic.
- Cmppd(dst.fp(), kScratchDoubleReg, int8_t{3});
- Psrlq(dst.fp(), 13);
- Andnpd(dst.fp(), kScratchDoubleReg);
+ F64x2Max(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4148,20 +4091,13 @@ void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vshufps(dst.fp(), lhs.fp(), lhs.fp(), imm_lane_idx);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- if (imm_lane_idx != 0) shufps(dst.fp(), dst.fp(), imm_lane_idx);
- }
+ F32x4ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- Pextrq(kScratchRegister, lhs.fp(), static_cast<int8_t>(imm_lane_idx));
- Movq(dst.fp(), kScratchRegister);
+ F64x2ExtractLane(dst.fp(), lhs.fp(), imm_lane_idx);
}
void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
@@ -4237,22 +4173,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- if (imm_lane_idx == 0) {
- vpblendw(dst.fp(), src1.fp(), src2.fp(), 0b00001111);
- } else {
- vmovlhps(dst.fp(), src1.fp(), src2.fp());
- }
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst.fp() != src1.fp()) movaps(dst.fp(), src1.fp());
- if (imm_lane_idx == 0) {
- pblendw(dst.fp(), src2.fp(), 0b00001111);
- } else {
- movlhps(dst.fp(), src2.fp());
- }
- }
+ F64x2ReplaceLane(dst.fp(), src1.fp(), src2.fp(), imm_lane_idx);
}
void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
@@ -4424,6 +4345,12 @@ void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
addq(rsp, Immediate(size));
}
+void LiftoffAssembler::MaybeOSR() {
+ cmpq(liftoff::GetOSRTargetSlot(), Immediate(0));
+ j(not_equal, static_cast<Address>(WasmCode::kWasmOnStackReplace),
+ RelocInfo::WASM_STUB_CALL);
+}
+
void LiftoffStackSlots::Construct(int param_slots) {
DCHECK_LT(0, slots_.size());
SortInPushOrder();
diff --git a/chromium/v8/src/wasm/branch-hint-map.h b/chromium/v8/src/wasm/branch-hint-map.h
new file mode 100644
index 00000000000..242bbecbce4
--- /dev/null
+++ b/chromium/v8/src/wasm/branch-hint-map.h
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BRANCH_HINT_MAP_H_
+#define V8_WASM_BRANCH_HINT_MAP_H_
+
+#include <unordered_map>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+
+enum class WasmBranchHint : uint8_t {
+ kNoHint = 0,
+ kUnlikely = 1,
+ kLikely = 2,
+};
+
+class V8_EXPORT_PRIVATE BranchHintMap {
+ public:
+ void insert(uint32_t offset, WasmBranchHint hint) {
+ map_.emplace(offset, hint);
+ }
+ WasmBranchHint GetHintFor(uint32_t offset) const {
+ auto it = map_.find(offset);
+ if (it == map_.end()) {
+ return WasmBranchHint::kNoHint;
+ }
+ return it->second;
+ }
+
+ private:
+ std::unordered_map<uint32_t, WasmBranchHint> map_;
+};
+
+using BranchHintInfo = std::unordered_map<uint32_t, BranchHintMap>;
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BRANCH_HINT_MAP_H_
diff --git a/chromium/v8/src/wasm/c-api.cc b/chromium/v8/src/wasm/c-api.cc
index 3af5afaee37..72d2e073055 100644
--- a/chromium/v8/src/wasm/c-api.cc
+++ b/chromium/v8/src/wasm/c-api.cc
@@ -257,7 +257,6 @@ void Engine::operator delete(void* p) { ::operator delete(p); }
auto Engine::make(own<Config>&& config) -> own<Engine> {
i::FLAG_expose_gc = true;
i::FLAG_experimental_wasm_reftypes = true;
- i::FLAG_experimental_wasm_mv = true;
auto engine = new (std::nothrow) EngineImpl;
if (!engine) return own<Engine>();
engine->platform = v8::platform::NewDefaultPlatform();
@@ -456,8 +455,7 @@ struct FuncTypeImpl : ExternTypeImpl {
ownvec<ValType> params;
ownvec<ValType> results;
- FuncTypeImpl(ownvec<ValType>& params, // NOLINT(runtime/references)
- ownvec<ValType>& results) // NOLINT(runtime/references)
+ FuncTypeImpl(ownvec<ValType>& params, ownvec<ValType>& results)
: ExternTypeImpl(EXTERN_FUNC),
params(std::move(params)),
results(std::move(results)) {}
@@ -508,8 +506,7 @@ struct GlobalTypeImpl : ExternTypeImpl {
own<ValType> content;
Mutability mutability;
- GlobalTypeImpl(own<ValType>& content, // NOLINT(runtime/references)
- Mutability mutability)
+ GlobalTypeImpl(own<ValType>& content, Mutability mutability)
: ExternTypeImpl(EXTERN_GLOBAL),
content(std::move(content)),
mutability(mutability) {}
@@ -561,8 +558,7 @@ struct TableTypeImpl : ExternTypeImpl {
own<ValType> element;
Limits limits;
- TableTypeImpl(own<ValType>& element, // NOLINT(runtime/references)
- Limits limits)
+ TableTypeImpl(own<ValType>& element, Limits limits)
: ExternTypeImpl(EXTERN_TABLE),
element(std::move(element)),
limits(limits) {}
@@ -653,9 +649,7 @@ struct ImportTypeImpl {
Name name;
own<ExternType> type;
- ImportTypeImpl(Name& module, // NOLINT(runtime/references)
- Name& name, // NOLINT(runtime/references)
- own<ExternType>& type) // NOLINT(runtime/references)
+ ImportTypeImpl(Name& module, Name& name, own<ExternType>& type)
: module(std::move(module)),
name(std::move(name)),
type(std::move(type)) {}
@@ -696,8 +690,7 @@ struct ExportTypeImpl {
Name name;
own<ExternType> type;
- ExportTypeImpl(Name& name, // NOLINT(runtime/references)
- own<ExternType>& type) // NOLINT(runtime/references)
+ ExportTypeImpl(Name& name, own<ExternType>& type)
: name(std::move(name)), type(std::move(type)) {}
};
@@ -1357,26 +1350,6 @@ i::Handle<i::Object> WasmRefToV8(i::Isolate* isolate, const Ref* ref) {
return impl(ref)->v8_object();
}
-i::Handle<i::Object> CallTargetForCaching(i::Isolate* isolate,
- i::Address real_call_target) {
- if (i::kTaggedSize == i::kInt32Size) {
- return isolate->factory()->NewForeign(real_call_target);
- } else {
- // 64-bit uncompressed platform.
- return i::handle(i::Smi((real_call_target << i::kSmiTagSize) | i::kSmiTag),
- isolate);
- }
-}
-
-i::Address CallTargetFromCache(i::Object cached_call_target) {
- if (i::kTaggedSize == i::kInt32Size) {
- return i::Foreign::cast(cached_call_target).foreign_address();
- } else {
- // 64-bit uncompressed platform.
- return cached_call_target.ptr() >> i::kSmiTagSize;
- }
-}
-
void PrepareFunctionData(i::Isolate* isolate,
i::Handle<i::WasmExportedFunctionData> function_data,
const i::wasm::FunctionSig* sig,
@@ -1390,12 +1363,6 @@ void PrepareFunctionData(i::Isolate* isolate,
// Compute packed args size.
function_data->set_packed_args_size(
i::wasm::CWasmArgumentsPacker::TotalSize(sig));
- // Get call target (function table offset), and wrap it as a cacheable object
- // (pseudo-Smi or Foreign, depending on platform).
- i::Handle<i::Object> call_target = CallTargetForCaching(
- isolate,
- function_data->instance().GetCallTarget(function_data->function_index()));
- function_data->set_wasm_call_target(*call_target);
}
void PushArgs(const i::wasm::FunctionSig* sig, const Val args[],
@@ -1532,8 +1499,7 @@ auto Func::call(const Val args[], Val results[]) const -> own<Trap> {
PrepareFunctionData(isolate, function_data, sig, instance->module());
i::Handle<i::Code> wrapper_code = i::Handle<i::Code>(
i::Code::cast(function_data->c_wrapper_code()), isolate);
- i::Address call_target =
- CallTargetFromCache(function_data->wasm_call_target());
+ i::Address call_target = function_data->foreign_address();
i::wasm::CWasmArgumentsPacker packer(function_data->packed_args_size());
PushArgs(sig, args, &packer, store);
diff --git a/chromium/v8/src/wasm/compilation-environment.h b/chromium/v8/src/wasm/compilation-environment.h
index 987180c83f6..a10190f70b0 100644
--- a/chromium/v8/src/wasm/compilation-environment.h
+++ b/chromium/v8/src/wasm/compilation-environment.h
@@ -38,8 +38,6 @@ enum RuntimeExceptionSupport : bool {
enum UseTrapHandler : bool { kUseTrapHandler = true, kNoTrapHandler = false };
-enum LowerSimd : bool { kLowerSimd = true, kNoLowerSimd = false };
-
// The {CompilationEnv} encapsulates the module data that is used during
// compilation. CompilationEnvs are shareable across multiple compilations.
struct CompilationEnv {
@@ -66,8 +64,6 @@ struct CompilationEnv {
// Features enabled for this compilation.
const WasmFeatures enabled_features;
- const LowerSimd lower_simd;
-
// We assume that memories of size >= half of the virtual address space
// cannot be allocated (see https://crbug.com/1201340).
static constexpr uint32_t kMaxMemoryPagesAtRuntime = std::min(
@@ -77,8 +73,7 @@ struct CompilationEnv {
constexpr CompilationEnv(const WasmModule* module,
UseTrapHandler use_trap_handler,
RuntimeExceptionSupport runtime_exception_support,
- const WasmFeatures& enabled_features,
- LowerSimd lower_simd = kNoLowerSimd)
+ const WasmFeatures& enabled_features)
: module(module),
use_trap_handler(use_trap_handler),
runtime_exception_support(runtime_exception_support),
@@ -92,8 +87,7 @@ struct CompilationEnv {
module && module->has_maximum_pages ? module->maximum_pages
: max_mem_pages()) *
uint64_t{kWasmPageSize})),
- enabled_features(enabled_features),
- lower_simd(lower_simd) {}
+ enabled_features(enabled_features) {}
};
// The wire bytes are either owned by the StreamingDecoder, or (after streaming)
@@ -127,6 +121,8 @@ class V8_EXPORT_PRIVATE CompilationState {
void CancelCompilation();
+ void CancelInitialCompilation();
+
void SetError();
void SetWireBytesStorage(std::shared_ptr<WireBytesStorage>);
diff --git a/chromium/v8/src/wasm/function-body-decoder-impl.h b/chromium/v8/src/wasm/function-body-decoder-impl.h
index d37f7186818..94c34c402d5 100644
--- a/chromium/v8/src/wasm/function-body-decoder-impl.h
+++ b/chromium/v8/src/wasm/function-body-decoder-impl.h
@@ -539,13 +539,6 @@ struct BlockTypeImmediate {
type = value_type_reader::read_value_type<validate>(decoder, pc, &length,
module, enabled);
} else {
- if (!VALIDATE(enabled.has_mv())) {
- DecodeError<validate>(decoder, pc,
- "invalid block type %" PRId64
- ", enable with --experimental-wasm-mv",
- block_type);
- return;
- }
type = kWasmBottom;
sig_index = static_cast<uint32_t>(block_type);
}
@@ -582,18 +575,6 @@ struct BranchDepthImmediate {
};
template <Decoder::ValidateFlag validate>
-struct BranchOnExceptionImmediate {
- BranchDepthImmediate<validate> depth;
- ExceptionIndexImmediate<validate> index;
- uint32_t length = 0;
- inline BranchOnExceptionImmediate(Decoder* decoder, const byte* pc)
- : depth(BranchDepthImmediate<validate>(decoder, pc)),
- index(ExceptionIndexImmediate<validate>(decoder, pc + depth.length)) {
- length = depth.length + index.length;
- }
-};
-
-template <Decoder::ValidateFlag validate>
struct FunctionIndexImmediate {
uint32_t index = 0;
uint32_t length = 1;
@@ -965,8 +946,10 @@ enum Reachability : uint8_t {
template <typename Value, Decoder::ValidateFlag validate>
struct ControlBase : public PcForErrors<validate> {
ControlKind kind = kControlBlock;
- uint32_t locals_count = 0;
- uint32_t stack_depth = 0; // stack height at the beginning of the construct.
+ uint32_t locals_count = 0; // Additional locals introduced in this 'let'.
+ uint32_t stack_depth = 0; // Stack height at the beginning of the construct.
+ int32_t previous_catch = -1; // Depth of the innermost catch containing this
+ // 'try'.
Reachability reachability = kReachable;
// Values merged into the start or end of this control construct.
@@ -1037,7 +1020,6 @@ struct ControlBase : public PcForErrors<validate> {
F(If, const Value& cond, Control* if_block) \
F(FallThruTo, Control* c) \
F(PopControl, Control* block) \
- F(EndControl, Control* block) \
/* Instructions: */ \
F(UnOp, WasmOpcode opcode, const Value& value, Value* result) \
F(BinOp, WasmOpcode opcode, const Value& lhs, const Value& rhs, \
@@ -1099,6 +1081,7 @@ struct ControlBase : public PcForErrors<validate> {
F(ReturnCallIndirect, const Value& index, \
const CallIndirectImmediate<validate>& imm, const Value args[]) \
F(BrOnNull, const Value& ref_object, uint32_t depth) \
+ F(BrOnNonNull, const Value& ref_object, uint32_t depth) \
F(SimdOp, WasmOpcode opcode, Vector<Value> args, Value* result) \
F(SimdLaneOp, WasmOpcode opcode, const SimdLaneImmediate<validate>& imm, \
const Vector<Value> inputs, Value* result) \
@@ -1184,9 +1167,7 @@ class WasmDecoder : public Decoder {
module_(module),
enabled_(enabled),
detected_(detected),
- sig_(sig) {
- if (sig_ && sig_->return_count() > 1) detected_->Add(kFeature_mv);
- }
+ sig_(sig) {}
Zone* zone() const { return local_types_.get_allocator().zone(); }
@@ -1433,9 +1414,6 @@ class WasmDecoder : public Decoder {
inline bool Complete(CallFunctionImmediate<validate>& imm) {
if (!VALIDATE(imm.index < module_->functions.size())) return false;
imm.sig = module_->functions[imm.index].sig;
- if (imm.sig->return_count() > 1) {
- this->detected_->Add(kFeature_mv);
- }
return true;
}
@@ -1450,13 +1428,11 @@ class WasmDecoder : public Decoder {
inline bool Complete(CallIndirectImmediate<validate>& imm) {
if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
- if (imm.sig->return_count() > 1) {
- this->detected_->Add(kFeature_mv);
- }
return true;
}
inline bool Validate(const byte* pc, CallIndirectImmediate<validate>& imm) {
+ // Validate immediate table index.
if (!VALIDATE(imm.table_index < module_->tables.size())) {
DecodeError(pc, "call_indirect: table index immediate out of bounds");
return false;
@@ -1468,10 +1444,13 @@ class WasmDecoder : public Decoder {
imm.table_index);
return false;
}
+
+ // Validate immediate signature index.
if (!Complete(imm)) {
DecodeError(pc, "invalid signature index: #%u", imm.sig_index);
return false;
}
+
// Check that the dynamic signature for this call is a subtype of the static
// type of the table the function is defined in.
ValueType immediate_type = ValueType::Ref(imm.sig_index, kNonNullable);
@@ -1480,6 +1459,7 @@ class WasmDecoder : public Decoder {
"call_indirect: Immediate signature #%u is not a subtype of "
"immediate table #%u",
imm.sig_index, imm.table_index);
+ return false;
}
return true;
}
@@ -1503,13 +1483,6 @@ class WasmDecoder : public Decoder {
return checkAvailable(imm.table_count);
}
- inline bool Validate(const byte* pc,
- BranchOnExceptionImmediate<validate>& imm,
- size_t control_size) {
- return Validate(pc, imm.depth, control_size) &&
- Validate(pc + imm.depth.length, imm.index);
- }
-
inline bool Validate(const byte* pc, WasmOpcode opcode,
SimdLaneImmediate<validate>& imm) {
uint8_t num_lanes = 0;
@@ -1573,9 +1546,6 @@ class WasmDecoder : public Decoder {
if (imm.type != kWasmBottom) return true;
if (!VALIDATE(module_->has_signature(imm.sig_index))) return false;
imm.sig = module_->signature(imm.sig_index);
- if (imm.sig->return_count() > 1) {
- this->detected_->Add(kFeature_mv);
- }
return true;
}
@@ -1709,6 +1679,7 @@ class WasmDecoder : public Decoder {
case kExprBr:
case kExprBrIf:
case kExprBrOnNull:
+ case kExprBrOnNonNull:
case kExprDelegate: {
BranchDepthImmediate<validate> imm(decoder, pc + 1);
return 1 + imm.length;
@@ -2043,6 +2014,7 @@ class WasmDecoder : public Decoder {
case kExprBrIf:
case kExprBrTable:
case kExprIf:
+ case kExprBrOnNonNull:
return {1, 0};
case kExprLocalGet:
case kExprGlobalGet:
@@ -2183,12 +2155,22 @@ MemoryAccessImmediate<validate>::MemoryAccessImmediate(
: MemoryAccessImmediate(decoder, pc, max_alignment,
decoder->module_->is_memory64) {}
+// Only call this in contexts where {current_code_reachable_and_ok_} is known to
+// hold.
+#define CALL_INTERFACE(name, ...) \
+ do { \
+ DCHECK(!control_.empty()); \
+ DCHECK(current_code_reachable_and_ok_); \
+ DCHECK_EQ(current_code_reachable_and_ok_, \
+ this->ok() && control_.back().reachable()); \
+ interface_.name(this, ##__VA_ARGS__); \
+ } while (false)
#define CALL_INTERFACE_IF_OK_AND_REACHABLE(name, ...) \
do { \
DCHECK(!control_.empty()); \
DCHECK_EQ(current_code_reachable_and_ok_, \
this->ok() && control_.back().reachable()); \
- if (current_code_reachable_and_ok_) { \
+ if (V8_LIKELY(current_code_reachable_and_ok_)) { \
interface_.name(this, ##__VA_ARGS__); \
} \
} while (false)
@@ -2289,33 +2271,39 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return WasmOpcodes::OpcodeName(opcode);
}
- inline WasmCodePosition position() {
+ WasmCodePosition position() const {
int offset = static_cast<int>(this->pc_ - this->start_);
DCHECK_EQ(this->pc_ - this->start_, offset); // overflows cannot happen
return offset;
}
- inline uint32_t control_depth() const {
+ uint32_t control_depth() const {
return static_cast<uint32_t>(control_.size());
}
- inline Control* control_at(uint32_t depth) {
+ Control* control_at(uint32_t depth) {
DCHECK_GT(control_.size(), depth);
return &control_.back() - depth;
}
- inline uint32_t stack_size() const {
+ uint32_t stack_size() const {
DCHECK_GE(stack_end_, stack_);
DCHECK_GE(kMaxUInt32, stack_end_ - stack_);
return static_cast<uint32_t>(stack_end_ - stack_);
}
- inline Value* stack_value(uint32_t depth) {
+ Value* stack_value(uint32_t depth) const {
DCHECK_LT(0, depth);
DCHECK_GE(stack_size(), depth);
return stack_end_ - depth;
}
+ int32_t current_catch() const { return current_catch_; }
+
+ uint32_t control_depth_of_current_catch() const {
+ return control_depth() - 1 - current_catch();
+ }
+
void SetSucceedingCodeDynamicallyUnreachable() {
Control* current = &control_.back();
if (current->reachable()) {
@@ -2324,7 +2312,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
+ inline uint32_t pc_relative_offset() const {
+ return this->pc_offset() - first_instruction_offset;
+ }
+
private:
+ uint32_t first_instruction_offset = 0;
Interface interface_;
// The value stack, stored as individual pointers for maximum performance.
@@ -2340,6 +2333,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// a cache for {ok() && control_.back().reachable()}).
bool current_code_reachable_and_ok_ = true;
+ // Depth of the current try block.
+ int32_t current_catch_ = -1;
+
static Value UnreachableValue(const uint8_t* pc) {
return Value{pc, kWasmBottom};
}
@@ -2519,6 +2515,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
ArgVector args = PeekArgs(imm.sig);
Control* try_block = PushControl(kControlTry, 0, args.length());
SetBlockType(try_block, imm, args.begin());
+ try_block->previous_catch = current_catch_;
+ current_catch_ = static_cast<int>(control_depth() - 1);
CALL_INTERFACE_IF_OK_AND_REACHABLE(Try, try_block);
DropArgs(imm.sig);
PushMergeValues(try_block, &try_block->start_merge);
@@ -2543,7 +2541,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("catch after unwind for try");
return 0;
}
- FallThruTo(c);
+ FallThrough();
c->kind = kControlTryCatch;
// TODO(jkummerow): Consider moving the stack manipulation after the
// INTERFACE call for consistency.
@@ -2556,6 +2554,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Push(CreateValue(sig->GetParam(i)));
}
Vector<Value> values(stack_ + c->stack_depth, sig->parameter_count());
+ current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchException, imm, c, values);
current_code_reachable_and_ok_ = this->ok() && c->reachable();
return 1 + imm.length;
@@ -2584,11 +2583,11 @@ class WasmFullDecoder : public WasmDecoder<validate> {
"cannot delegate inside the catch handler of the target");
return 0;
}
- FallThruTo(c);
+ FallThrough();
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
- current_code_reachable_and_ok_ = this->ok() && control_.back().reachable();
+ current_catch_ = c->previous_catch;
EndControl();
- PopControl(c);
+ PopControl();
return 1 + imm.length;
}
@@ -2608,9 +2607,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("cannot have catch-all after unwind");
return 0;
}
- FallThruTo(c);
+ FallThrough();
c->kind = kControlTryCatchAll;
c->reachability = control_at(1)->innerReachability();
+ current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
stack_end_ = stack_ + c->stack_depth;
current_code_reachable_and_ok_ = this->ok() && c->reachable();
@@ -2630,9 +2630,10 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->error("catch, catch-all or unwind already present for try");
return 0;
}
- FallThruTo(c);
+ FallThrough();
c->kind = kControlTryUnwind;
c->reachability = control_at(1)->innerReachability();
+ current_catch_ = c->previous_catch; // Pop try scope.
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
stack_end_ = stack_ + c->stack_depth;
current_code_reachable_and_ok_ = this->ok() && c->reachable();
@@ -2645,33 +2646,69 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Value ref_object = Peek(0, 0);
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 1);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 1))) return 0;
switch (ref_object.type.kind()) {
case kBottom:
// We are in a polymorphic stack. Leave the stack as it is.
- DCHECK(check_result != kReachableBranch);
+ DCHECK(!current_code_reachable_and_ok_);
break;
case kRef:
// For a non-nullable value, we won't take the branch, and can leave
// the stack as it is.
break;
case kOptRef: {
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnNull, ref_object, imm.depth);
Value result = CreateValue(
ValueType::Ref(ref_object.type.heap_type(), kNonNullable));
// The result of br_on_null has the same value as the argument (but a
// non-nullable type).
- CALL_INTERFACE_IF_OK_AND_REACHABLE(Forward, ref_object, &result);
- c->br_merge()->reached = true;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOnNull, ref_object, imm.depth);
+ CALL_INTERFACE(Forward, ref_object, &result);
+ c->br_merge()->reached = true;
+ }
+ // In unreachable code, we still have to push a value of the correct
+ // type onto the stack.
Drop(ref_object);
Push(result);
- } else {
- // Even in non-reachable code, we need to push a value of the correct
- // type to the stack.
- Drop(ref_object);
- Push(CreateValue(
- ValueType::Ref(ref_object.type.heap_type(), kNonNullable)));
+ break;
+ }
+ default:
+ PopTypeError(0, ref_object, "object reference");
+ return 0;
+ }
+ return 1 + imm.length;
+ }
+
+ DECODE(BrOnNonNull) {
+ CHECK_PROTOTYPE_OPCODE(gc);
+ BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
+ if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
+ Value ref_object = Peek(0, 0, kWasmAnyRef);
+ Drop(ref_object);
+ // Typechecking the branch and creating the branch merges requires the
+ // non-null value on the stack, so we push it temporarily.
+ Value result = CreateValue(ref_object.type.AsNonNull());
+ Push(result);
+ Control* c = control_at(imm.depth);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ switch (ref_object.type.kind()) {
+ case kBottom:
+ // We are in unreachable code. Do nothing.
+ DCHECK(!current_code_reachable_and_ok_);
+ break;
+ case kRef:
+ // For a non-nullable value, we always take the branch.
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(Forward, ref_object, stack_value(1));
+ CALL_INTERFACE(BrOrRet, imm.depth, 0);
+ c->br_merge()->reached = true;
+ }
+ break;
+ case kOptRef: {
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(Forward, ref_object, stack_value(1));
+ CALL_INTERFACE(BrOnNonNull, ref_object, imm.depth);
+ c->br_merge()->reached = true;
}
break;
}
@@ -2679,6 +2716,8 @@ class WasmFullDecoder : public WasmDecoder<validate> {
PopTypeError(0, ref_object, "object reference");
return 0;
}
+ // If we stay in the branch, {ref_object} is null. Drop it from the stack.
+ Drop(result);
return 1 + imm.length;
}
@@ -2751,7 +2790,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->DecodeError("else already present for if");
return 0;
}
- if (!TypeCheckFallThru()) return 0;
+ if (!VALIDATE(TypeCheckFallThru())) return 0;
c->kind = kControlIfElse;
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Else, c);
if (c->reachable()) c->end_merge.reached = true;
@@ -2764,27 +2803,24 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE(End) {
DCHECK(!control_.empty());
Control* c = &control_.back();
- if (!VALIDATE(!c->is_incomplete_try())) {
- this->DecodeError("missing catch or catch-all in try");
- return 0;
- }
- if (c->is_onearmed_if()) {
- if (!VALIDATE(c->end_merge.arity == c->start_merge.arity)) {
- this->DecodeError(
- c->pc(), "start-arity and end-arity of one-armed if must match");
- return 0;
- }
- if (!TypeCheckOneArmedIf(c)) return 0;
- }
if (c->is_try_catch()) {
// Emulate catch-all + re-throw.
- FallThruTo(c);
+ FallThrough();
c->reachability = control_at(1)->innerReachability();
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(CatchAll, c);
current_code_reachable_and_ok_ =
this->ok() && control_.back().reachable();
CALL_INTERFACE_IF_OK_AND_REACHABLE(Rethrow, c);
EndControl();
+ PopControl();
+ return 1;
+ }
+ if (!VALIDATE(!c->is_incomplete_try())) {
+ this->DecodeError("missing catch or catch-all in try");
+ return 0;
+ }
+ if (c->is_onearmed_if()) {
+ if (!VALIDATE(TypeCheckOneArmedIf(c))) return 0;
}
if (c->is_try_unwind()) {
// Unwind implicitly rethrows at the end.
@@ -2798,7 +2834,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
this->local_types_.begin() + c->locals_count);
this->num_locals_ -= c->locals_count;
}
- if (!TypeCheckFallThru()) return 0;
if (control_.size() == 1) {
// If at the last (implicit) control, check we are at end.
@@ -2809,11 +2844,13 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// The result of the block is the return value.
trace_msg->Append("\n" TRACE_INST_FORMAT, startrel(this->pc_),
"(implicit) return");
- DoReturn();
+ DoReturn<kStrictCounting, kFallthroughMerge>();
control_.clear();
return 1;
}
- PopControl(c);
+
+ if (!VALIDATE(TypeCheckFallThru())) return 0;
+ PopControl();
return 1;
}
@@ -2853,9 +2890,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
BranchDepthImmediate<validate> imm(this, this->pc_ + 1);
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, false, 0);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOrRet, imm.depth, 0);
+ if (!VALIDATE(TypeCheckBranch<false>(c, 0))) return 0;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOrRet, imm.depth, 0);
c->br_merge()->reached = true;
}
EndControl();
@@ -2867,9 +2904,9 @@ class WasmFullDecoder : public WasmDecoder<validate> {
if (!this->Validate(this->pc_ + 1, imm, control_.size())) return 0;
Value cond = Peek(0, 0, kWasmI32);
Control* c = control_at(imm.depth);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 1);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrIf, cond, imm.depth);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 1))) return 0;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrIf, cond, imm.depth);
c->br_merge()->reached = true;
}
Drop(cond);
@@ -2887,40 +2924,38 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// all branch targets as reachable after the {CALL_INTERFACE} call.
std::vector<bool> br_targets(control_.size());
- // The result types of the br_table instruction. We have to check the
- // stack against these types. Only needed during validation.
- std::vector<ValueType> result_types;
+ uint32_t arity = 0;
while (iterator.has_next()) {
const uint32_t index = iterator.cur_index();
const byte* pos = iterator.pc();
- uint32_t target = iterator.next();
- if (!VALIDATE(ValidateBrTableTarget(target, pos, index))) return 0;
+ const uint32_t target = iterator.next();
+ if (!VALIDATE(target < control_depth())) {
+ this->DecodeError(pos, "invalid branch depth: %u", target);
+ return 0;
+ }
// Avoid redundant branch target checks.
if (br_targets[target]) continue;
br_targets[target] = true;
if (validate) {
if (index == 0) {
- // With the first branch target, initialize the result types.
- result_types = InitializeBrTableResultTypes(target);
- } else if (!UpdateBrTableResultTypes(&result_types, target, pos,
- index)) {
+ arity = control_at(target)->br_merge()->arity;
+ } else if (!VALIDATE(control_at(target)->br_merge()->arity == arity)) {
+ this->DecodeError(
+ pos, "br_table: label arity inconsistent with previous arity %d",
+ arity);
return 0;
}
+ if (!VALIDATE(TypeCheckBranch<false>(control_at(target), 1))) return 0;
}
}
- if (!VALIDATE(TypeCheckBrTable(result_types, 1))) return 0;
-
- DCHECK(this->ok());
-
- if (current_code_reachable_and_ok_) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrTable, imm, key);
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrTable, imm, key);
- for (int i = 0, e = control_depth(); i < e; ++i) {
- if (!br_targets[i]) continue;
- control_at(i)->br_merge()->reached = true;
+ for (uint32_t i = 0; i < control_depth(); ++i) {
+ control_at(i)->br_merge()->reached |= br_targets[i];
}
}
Drop(key);
@@ -2929,22 +2964,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
DECODE(Return) {
- if (V8_LIKELY(current_code_reachable_and_ok_)) {
- if (!VALIDATE(TypeCheckReturn())) return 0;
- DoReturn();
- } else {
- // We inspect all return values from the stack to check their type.
- // Since we deal with unreachable code, we do not have to keep the
- // values.
- int num_returns = static_cast<int>(this->sig_->return_count());
- for (int i = num_returns - 1, depth = 0; i >= 0; --i, ++depth) {
- Peek(depth, i, this->sig_->GetReturn(i));
- }
- Drop(num_returns);
- }
-
- EndControl();
- return 1;
+ return DoReturn<kNonStrictCounting, kReturnMerge>() ? 1 : 0;
}
DECODE(Unreachable) {
@@ -3409,6 +3429,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
DECODE_IMPL(CatchAll);
DECODE_IMPL(Unwind);
DECODE_IMPL(BrOnNull);
+ DECODE_IMPL(BrOnNonNull);
DECODE_IMPL(Let);
DECODE_IMPL(Loop);
DECODE_IMPL(If);
@@ -3490,6 +3511,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_OK_AND_REACHABLE(StartFunctionBody, c);
}
+ first_instruction_offset = this->pc_offset();
// Decode the function body.
while (this->pc_ < this->end_) {
// Most operations only grow the stack by at least one element (unary and
@@ -3526,7 +3548,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Control* current = &control_.back();
DCHECK_LE(stack_ + current->stack_depth, stack_end_);
stack_end_ = stack_ + current->stack_depth;
- CALL_INTERFACE_IF_OK_AND_REACHABLE(EndControl, current);
current->reachability = kUnreachable;
current_code_reachable_and_ok_ = false;
}
@@ -3642,11 +3663,12 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return &control_.back();
}
- void PopControl(Control* c) {
+ void PopControl() {
// This cannot be the outermost control block.
DCHECK_LT(1, control_.size());
+ Control* c = &control_.back();
+ DCHECK_LE(stack_ + c->stack_depth, stack_end_);
- DCHECK_EQ(c, &control_.back());
CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(PopControl, c);
// A loop just leaves the values on the stack.
@@ -3658,7 +3680,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// If the parent block was reachable before, but the popped control does not
// return to here, this block becomes "spec only reachable".
if (!parent_reached) SetSucceedingCodeDynamicallyUnreachable();
- current_code_reachable_and_ok_ = control_.back().reachable();
+ current_code_reachable_and_ok_ = this->ok() && control_.back().reachable();
}
int DecodeLoadMem(LoadType type, int prefix_len = 1) {
@@ -3739,92 +3761,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
return prefix_len + imm.length;
}
- bool ValidateBrTableTarget(uint32_t target, const byte* pos, int index) {
- if (!VALIDATE(target < this->control_.size())) {
- this->DecodeError(pos, "improper branch in br_table target %u (depth %u)",
- index, target);
- return false;
- }
- return true;
- }
-
- std::vector<ValueType> InitializeBrTableResultTypes(uint32_t target) {
- Merge<Value>* merge = control_at(target)->br_merge();
- int br_arity = merge->arity;
- std::vector<ValueType> result(br_arity);
- for (int i = 0; i < br_arity; ++i) {
- result[i] = (*merge)[i].type;
- }
- return result;
- }
-
- bool UpdateBrTableResultTypes(std::vector<ValueType>* result_types,
- uint32_t target, const byte* pos, int index) {
- Merge<Value>* merge = control_at(target)->br_merge();
- int br_arity = merge->arity;
- // First we check if the arities match.
- if (!VALIDATE(br_arity == static_cast<int>(result_types->size()))) {
- this->DecodeError(pos,
- "inconsistent arity in br_table target %u (previous "
- "was %zu, this one is %u)",
- index, result_types->size(), br_arity);
- return false;
- }
-
- for (int i = 0; i < br_arity; ++i) {
- if (this->enabled_.has_reftypes()) {
- // The expected type is the biggest common sub type of all targets.
- (*result_types)[i] =
- CommonSubtype((*result_types)[i], (*merge)[i].type, this->module_);
- } else {
- // All target must have the same signature.
- if (!VALIDATE((*result_types)[i] == (*merge)[i].type)) {
- this->DecodeError(pos,
- "inconsistent type in br_table target %u (previous "
- "was %s, this one is %s)",
- index, (*result_types)[i].name().c_str(),
- (*merge)[i].type.name().c_str());
- return false;
- }
- }
- }
- return true;
- }
-
- bool TypeCheckBrTable(const std::vector<ValueType>& result_types,
- uint32_t drop_values) {
- int br_arity = static_cast<int>(result_types.size());
- if (V8_LIKELY(!control_.back().unreachable())) {
- int available =
- static_cast<int>(stack_size()) - control_.back().stack_depth;
- available -= std::min(available, static_cast<int>(drop_values));
- // There have to be enough values on the stack.
- if (!VALIDATE(available >= br_arity)) {
- this->DecodeError(
- "expected %u elements on the stack for branch to @%d, found %u",
- br_arity, startrel(control_.back().pc()), available);
- return false;
- }
- Value* stack_values = stack_end_ - br_arity - drop_values;
- // Type-check the topmost br_arity values on the stack.
- for (int i = 0; i < br_arity; ++i) {
- Value& val = stack_values[i];
- if (!VALIDATE(IsSubtypeOf(val.type, result_types[i], this->module_))) {
- this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
- result_types[i].name().c_str(),
- val.type.name().c_str());
- return false;
- }
- }
- } else { // !control_.back().reachable()
- // Type-check the values on the stack.
- for (int i = 0; i < br_arity; ++i) {
- Peek(i + drop_values, i + 1, result_types[i]);
- }
- }
- return this->ok();
- }
-
uint32_t SimdConstOp(uint32_t opcode_length) {
Simd128Immediate<validate> imm(this, this->pc_ + opcode_length);
Value result = CreateValue(kWasmS128);
@@ -4377,7 +4313,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
CALL_INTERFACE_IF_OK_AND_REACHABLE(Drop);
CALL_INTERFACE_IF_OK_AND_REACHABLE(AssertNull, obj, &value);
} else {
- // TODO(manoskouk): Change the trap label.
CALL_INTERFACE_IF_OK_AND_REACHABLE(Trap,
TrapReason::kTrapIllegalCast);
EndControl();
@@ -4426,23 +4361,21 @@ class WasmFullDecoder : public WasmDecoder<validate> {
? kWasmBottom
: ValueType::Ref(rtt.type.ref_index(), kNonNullable));
Push(result_on_branch);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 0);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- // This logic ensures that code generation can assume that functions
- // can only be cast to function types, and data objects to data types.
- if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
- // The {value_on_branch} parameter we pass to the interface must
- // be pointer-identical to the object on the stack, so we can't
- // reuse {result_on_branch} which was passed-by-value to {Push}.
- Value* value_on_branch = stack_value(1);
- CALL_INTERFACE_IF_OK_AND_REACHABLE(
- BrOnCast, obj, rtt, value_on_branch, branch_depth.depth);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ // This logic ensures that code generation can assume that functions
+ // can only be cast to function types, and data objects to data types.
+ if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
+ // The {value_on_branch} parameter we pass to the interface must
+ // be pointer-identical to the object on the stack, so we can't
+ // reuse {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ CALL_INTERFACE(BrOnCast, obj, rtt, value_on_branch,
+ branch_depth.depth);
c->br_merge()->reached = true;
}
- // Otherwise the types are unrelated. Do not branch.
- } else if (check_result == kInvalidStack) {
- return 0;
}
+ // Otherwise the types are unrelated. Do not branch.
Drop(result_on_branch);
Push(obj); // Restore stack state on fallthrough.
return opcode_length + branch_depth.length;
@@ -4510,25 +4443,20 @@ class WasmFullDecoder : public WasmDecoder<validate> {
Value result_on_branch =
CreateValue(ValueType::Ref(heap_type, kNonNullable));
Push(result_on_branch);
- TypeCheckBranchResult check_result = TypeCheckBranch(c, true, 0);
- if (V8_LIKELY(check_result == kReachableBranch)) {
- // The {value_on_branch} parameter we pass to the interface must be
- // pointer-identical to the object on the stack, so we can't reuse
- // {result_on_branch} which was passed-by-value to {Push}.
- Value* value_on_branch = stack_value(1);
+ if (!VALIDATE(TypeCheckBranch<true>(c, 0))) return 0;
+ // The {value_on_branch} parameter we pass to the interface must be
+ // pointer-identical to the object on the stack, so we can't reuse
+ // {result_on_branch} which was passed-by-value to {Push}.
+ Value* value_on_branch = stack_value(1);
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
if (opcode == kExprBrOnFunc) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnFunc, obj, value_on_branch,
- branch_depth.depth);
+ CALL_INTERFACE(BrOnFunc, obj, value_on_branch, branch_depth.depth);
} else if (opcode == kExprBrOnData) {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnData, obj, value_on_branch,
- branch_depth.depth);
+ CALL_INTERFACE(BrOnData, obj, value_on_branch, branch_depth.depth);
} else {
- CALL_INTERFACE_IF_OK_AND_REACHABLE(BrOnI31, obj, value_on_branch,
- branch_depth.depth);
+ CALL_INTERFACE(BrOnI31, obj, value_on_branch, branch_depth.depth);
}
c->br_merge()->reached = true;
- } else if (check_result == kInvalidStack) {
- return 0;
}
Drop(result_on_branch);
Push(obj); // Restore stack state on fallthrough.
@@ -4714,11 +4642,6 @@ class WasmFullDecoder : public WasmDecoder<validate> {
}
}
- void DoReturn() {
- DCHECK_GE(stack_size(), this->sig_->return_count());
- CALL_INTERFACE_IF_OK_AND_REACHABLE(DoReturn, 0);
- }
-
V8_INLINE void EnsureStackSpace(int slots_needed) {
if (V8_LIKELY(stack_capacity_end_ - stack_end_ >= slots_needed)) return;
GrowStackSpace(slots_needed);
@@ -4842,7 +4765,7 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// TODO(wasm): This check is often redundant.
if (V8_UNLIKELY(stack_size() < limit + count)) {
// Popping past the current control start in reachable code.
- if (!VALIDATE(!control_.back().reachable())) {
+ if (!VALIDATE(!current_code_reachable_and_ok_)) {
NotEnoughArgumentsError(0);
}
// Pop what we can.
@@ -4854,188 +4777,152 @@ class WasmFullDecoder : public WasmDecoder<validate> {
// For more descriptive call sites:
V8_INLINE void Drop(const Value& /* unused */) { Drop(1); }
- // Pops values from the stack, as defined by {merge}. Thereby we type-check
- // unreachable merges. Afterwards the values are pushed again on the stack
- // according to the signature in {merge}. This is done so follow-up validation
- // is possible.
- bool TypeCheckUnreachableMerge(Merge<Value>& merge, bool conditional_branch,
- uint32_t drop_values = 0) {
- int arity = merge.arity;
- // For conditional branches, stack value '0' is the condition of the branch,
- // and the result values start at index '1'.
- int index_offset = conditional_branch ? 1 : 0;
+ enum StackElementsCountMode : bool {
+ kNonStrictCounting = false,
+ kStrictCounting = true
+ };
+
+ enum MergeType { kBranchMerge, kReturnMerge, kFallthroughMerge };
+
+ // - If the current code is reachable, check if the current stack values are
+ // compatible with {merge} based on their number and types. Disregard the
+ // first {drop_values} on the stack. If {strict_count}, check that
+ // #(stack elements) == {merge->arity}, otherwise
+ // #(stack elements) >= {merge->arity}.
+ // - If the current code is unreachable, check if any values that may exist on
+ // top of the stack are compatible with {merge}. If {push_branch_values},
+ // push back to the stack values based on the type of {merge} (this is
+ // needed for conditional branches due to their typing rules, and
+ // fallthroughs so that the outer control finds the expected values on the
+ // stack). TODO(manoskouk): We expect the unreachable-code behavior to
+ // change, either due to relaxation of dead code verification, or the
+ // introduction of subtyping.
+ template <StackElementsCountMode strict_count, bool push_branch_values,
+ MergeType merge_type>
+ bool TypeCheckStackAgainstMerge(uint32_t drop_values, Merge<Value>* merge) {
+ static_assert(validate, "Call this function only within VALIDATE");
+ constexpr const char* merge_description =
+ merge_type == kBranchMerge
+ ? "branch"
+ : merge_type == kReturnMerge ? "return" : "fallthru";
+ uint32_t arity = merge->arity;
+ uint32_t actual = stack_size() - control_.back().stack_depth;
+ if (V8_LIKELY(current_code_reachable_and_ok_)) {
+ if (V8_UNLIKELY(strict_count ? actual != drop_values + arity
+ : actual < drop_values + arity)) {
+ this->DecodeError("expected %u elements on the stack for %s, found %u",
+ arity, merge_description,
+ actual >= drop_values ? actual - drop_values : 0);
+ return false;
+ }
+ // Typecheck the topmost {merge->arity} values on the stack.
+ Value* stack_values = stack_end_ - (arity + drop_values);
+ for (uint32_t i = 0; i < arity; ++i) {
+ Value& val = stack_values[i];
+ Value& old = (*merge)[i];
+ if (!IsSubtypeOf(val.type, old.type, this->module_)) {
+ this->DecodeError("type error in %s[%u] (expected %s, got %s)",
+ merge_description, i, old.type.name().c_str(),
+ val.type.name().c_str());
+ return false;
+ }
+ }
+ return true;
+ }
+ // Unreachable code validation starts here.
+ if (V8_UNLIKELY(strict_count && actual > drop_values + arity)) {
+ this->DecodeError("expected %u elements on the stack for %s, found %u",
+ arity, merge_description,
+ actual >= drop_values ? actual - drop_values : 0);
+ return false;
+ }
+ // TODO(manoskouk): Use similar code as above if we keep unreachable checks.
for (int i = arity - 1, depth = drop_values; i >= 0; --i, ++depth) {
- Peek(depth, index_offset + i, merge[i].type);
- }
- // Push values of the correct type onto the stack.
- Drop(drop_values);
- Drop(arity);
- // {Drop} is adaptive for polymorphic stacks: it might drop fewer values
- // than requested. So ensuring stack space here is not redundant.
- EnsureStackSpace(arity + drop_values);
- for (int i = 0; i < arity; i++) Push(CreateValue(merge[i].type));
- // {drop_values} are about to be dropped anyway, so we can forget their
- // previous types, but we do have to maintain the correct stack height.
- for (uint32_t i = 0; i < drop_values; i++) {
- Push(UnreachableValue(this->pc_));
+ Peek(depth, i, (*merge)[i].type);
+ }
+ if (push_branch_values) {
+ Drop(drop_values);
+ Drop(arity);
+ // {Drop} is adaptive for polymorphic stacks: it might drop fewer values
+ // than requested. So ensuring stack space here is not redundant.
+ EnsureStackSpace(drop_values + arity);
+ // Push values of the correct type onto the stack.
+ for (int i = 0; i < static_cast<int>(arity); i++) {
+ Push(CreateValue((*merge)[i].type));
+ }
+ // {drop_values} are about to be dropped anyway, so we can forget their
+ // previous types, but we do have to maintain the correct stack height.
+ for (uint32_t i = 0; i < drop_values; i++) {
+ Push(UnreachableValue(this->pc_));
+ }
}
return this->ok();
}
+ template <StackElementsCountMode strict_count, MergeType merge_type>
+ bool DoReturn() {
+ if (!VALIDATE((TypeCheckStackAgainstMerge<strict_count, false, merge_type>(
+ 0, &control_.front().end_merge)))) {
+ return false;
+ }
+ DCHECK_IMPLIES(current_code_reachable_and_ok_,
+ stack_size() >= this->sig_->return_count());
+ CALL_INTERFACE_IF_OK_AND_REACHABLE(DoReturn, 0);
+ EndControl();
+ return true;
+ }
+
int startrel(const byte* ptr) { return static_cast<int>(ptr - this->start_); }
- void FallThruTo(Control* c) {
- DCHECK_EQ(c, &control_.back());
+ void FallThrough() {
+ Control* c = &control_.back();
DCHECK_NE(c->kind, kControlLoop);
- if (!TypeCheckFallThru()) return;
+ if (!VALIDATE(TypeCheckFallThru())) return;
CALL_INTERFACE_IF_OK_AND_REACHABLE(FallThruTo, c);
if (c->reachable()) c->end_merge.reached = true;
}
- bool TypeCheckMergeValues(Control* c, uint32_t drop_values,
- Merge<Value>* merge) {
- static_assert(validate, "Call this function only within VALIDATE");
- DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- DCHECK_GE(stack_size() - drop_values, c->stack_depth + merge->arity);
- Value* stack_values = stack_value(merge->arity + drop_values);
- // Typecheck the topmost {merge->arity} values on the stack.
- for (uint32_t i = 0; i < merge->arity; ++i) {
- Value& val = stack_values[i];
- Value& old = (*merge)[i];
- if (!VALIDATE(IsSubtypeOf(val.type, old.type, this->module_))) {
- this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
- old.type.name().c_str(), val.type.name().c_str());
- return false;
- }
- }
-
- return true;
- }
-
bool TypeCheckOneArmedIf(Control* c) {
static_assert(validate, "Call this function only within VALIDATE");
DCHECK(c->is_onearmed_if());
- DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
+ if (c->end_merge.arity != c->start_merge.arity) {
+ this->DecodeError(c->pc(),
+ "start-arity and end-arity of one-armed if must match");
+ return false;
+ }
for (uint32_t i = 0; i < c->start_merge.arity; ++i) {
Value& start = c->start_merge[i];
Value& end = c->end_merge[i];
- if (!VALIDATE(IsSubtypeOf(start.type, end.type, this->module_))) {
+ if (!IsSubtypeOf(start.type, end.type, this->module_)) {
this->DecodeError("type error in merge[%u] (expected %s, got %s)", i,
end.type.name().c_str(), start.type.name().c_str());
return false;
}
}
-
return true;
}
bool TypeCheckFallThru() {
static_assert(validate, "Call this function only within VALIDATE");
- Control& c = control_.back();
- if (V8_LIKELY(c.reachable())) {
- uint32_t expected = c.end_merge.arity;
- DCHECK_GE(stack_size(), c.stack_depth);
- uint32_t actual = stack_size() - c.stack_depth;
- // Fallthrus must match the arity of the control exactly.
- if (!VALIDATE(actual == expected)) {
- this->DecodeError(
- "expected %u elements on the stack for fallthru to @%d, found %u",
- expected, startrel(c.pc()), actual);
- return false;
- }
- if (expected == 0) return true; // Fast path.
-
- return TypeCheckMergeValues(&c, 0, &c.end_merge);
- }
-
- // Type-check an unreachable fallthru. First we do an arity check, then a
- // type check. Note that type-checking may require an adjustment of the
- // stack, if some stack values are missing to match the block signature.
- Merge<Value>& merge = c.end_merge;
- int arity = static_cast<int>(merge.arity);
- int available = static_cast<int>(stack_size()) - c.stack_depth;
- // For fallthrus, not more than the needed values should be available.
- if (!VALIDATE(available <= arity)) {
- this->DecodeError(
- "expected %u elements on the stack for fallthru to @%d, found %u",
- arity, startrel(c.pc()), available);
- return false;
- }
- // Pop all values from the stack for type checking of existing stack
- // values.
- return TypeCheckUnreachableMerge(merge, false);
+ return TypeCheckStackAgainstMerge<kStrictCounting, true, kFallthroughMerge>(
+ 0, &control_.back().end_merge);
}
- enum TypeCheckBranchResult {
- kReachableBranch,
- kUnreachableBranch,
- kInvalidStack,
- };
-
- // If the type code is reachable, check if the current stack values are
+ // If the current code is reachable, check if the current stack values are
// compatible with a jump to {c}, based on their number and types.
// Otherwise, we have a polymorphic stack: check if any values that may exist
- // on top of the stack are compatible with {c}, and push back to the stack
- // values based on the type of {c}.
- TypeCheckBranchResult TypeCheckBranch(Control* c, bool conditional_branch,
- uint32_t drop_values) {
- if (V8_LIKELY(control_.back().reachable())) {
- // We only do type-checking here. This is only needed during validation.
- if (!validate) return kReachableBranch;
-
- // Branches must have at least the number of values expected; can have
- // more.
- uint32_t expected = c->br_merge()->arity;
- if (expected == 0) return kReachableBranch; // Fast path.
- uint32_t limit = control_.back().stack_depth;
- if (!VALIDATE(stack_size() >= limit + drop_values + expected)) {
- uint32_t actual = stack_size() - limit;
- actual -= std::min(actual, drop_values);
- this->DecodeError(
- "expected %u elements on the stack for br to @%d, found %u",
- expected, startrel(c->pc()), actual);
- return kInvalidStack;
- }
- return TypeCheckMergeValues(c, drop_values, c->br_merge())
- ? kReachableBranch
- : kInvalidStack;
- }
-
- return TypeCheckUnreachableMerge(*c->br_merge(), conditional_branch,
- drop_values)
- ? kUnreachableBranch
- : kInvalidStack;
- }
-
- bool TypeCheckReturn() {
- int num_returns = static_cast<int>(this->sig_->return_count());
- // No type checking is needed if there are no returns.
- if (num_returns == 0) return true;
-
- // Returns must have at least the number of values expected; can have more.
- int num_available =
- static_cast<int>(stack_size()) - control_.back().stack_depth;
- if (!VALIDATE(num_available >= num_returns)) {
- this->DecodeError(
- "expected %u elements on the stack for return, found %u", num_returns,
- num_available);
- return false;
- }
-
- // Typecheck the topmost {num_returns} values on the stack.
- // This line requires num_returns > 0.
- Value* stack_values = stack_end_ - num_returns;
- for (int i = 0; i < num_returns; ++i) {
- Value& val = stack_values[i];
- ValueType expected_type = this->sig_->GetReturn(i);
- if (!VALIDATE(IsSubtypeOf(val.type, expected_type, this->module_))) {
- this->DecodeError("type error in return[%u] (expected %s, got %s)", i,
- expected_type.name().c_str(),
- val.type.name().c_str());
- return false;
- }
- }
- return true;
+ // on top of the stack are compatible with {c}. If {push_branch_values},
+ // push back to the stack values based on the type of {c} (this is needed for
+ // conditional branches due to their typing rules, and fallthroughs so that
+ // the outer control finds enough values on the stack).
+ // {drop_values} is the number of stack values that will be dropped before the
+ // branch is taken. This is currently 1 for for br (condition), br_table
+ // (index) and br_on_null (reference), and 0 for all other branches.
+ template <bool push_branch_values>
+ bool TypeCheckBranch(Control* c, uint32_t drop_values) {
+ static_assert(validate, "Call this function only within VALIDATE");
+ return TypeCheckStackAgainstMerge<kNonStrictCounting, push_branch_values,
+ kBranchMerge>(drop_values, c->br_merge());
}
void onFirstError() override {
diff --git a/chromium/v8/src/wasm/function-compiler.cc b/chromium/v8/src/wasm/function-compiler.cc
index 4303344f136..4a2db3d496e 100644
--- a/chromium/v8/src/wasm/function-compiler.cc
+++ b/chromium/v8/src/wasm/function-compiler.cc
@@ -306,7 +306,8 @@ JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit(
Isolate* isolate, WasmEngine* wasm_engine, const FunctionSig* sig,
const WasmModule* module, bool is_import,
const WasmFeatures& enabled_features, AllowGeneric allow_generic)
- : is_import_(is_import),
+ : isolate_(isolate),
+ is_import_(is_import),
sig_(sig),
use_generic_wrapper_(allow_generic && UseGenericWrapper(sig) &&
!is_import),
@@ -326,19 +327,19 @@ void JSToWasmWrapperCompilationUnit::Execute() {
}
}
-Handle<Code> JSToWasmWrapperCompilationUnit::Finalize(Isolate* isolate) {
+Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
Handle<Code> code;
if (use_generic_wrapper_) {
code =
- isolate->builtins()->builtin_handle(Builtins::kGenericJSToWasmWrapper);
+ isolate_->builtins()->builtin_handle(Builtins::kGenericJSToWasmWrapper);
} else {
- CompilationJob::Status status = job_->FinalizeJob(isolate);
+ CompilationJob::Status status = job_->FinalizeJob(isolate_);
CHECK_EQ(status, CompilationJob::SUCCEEDED);
code = job_->compilation_info()->code();
}
- if (!use_generic_wrapper_ && must_record_function_compilation(isolate)) {
+ if (!use_generic_wrapper_ && must_record_function_compilation(isolate_)) {
RecordWasmHeapStubCompilation(
- isolate, code, "%s", job_->compilation_info()->GetDebugName().get());
+ isolate_, code, "%s", job_->compilation_info()->GetDebugName().get());
}
return code;
}
@@ -353,7 +354,7 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper(
module, is_import, enabled_features,
kAllowGeneric);
unit.Execute();
- return unit.Finalize(isolate);
+ return unit.Finalize();
}
// static
@@ -366,7 +367,7 @@ Handle<Code> JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper(
module, is_import, enabled_features,
kDontAllowGeneric);
unit.Execute();
- return unit.Finalize(isolate);
+ return unit.Finalize();
}
} // namespace wasm
diff --git a/chromium/v8/src/wasm/function-compiler.h b/chromium/v8/src/wasm/function-compiler.h
index f8d1f00a4e4..80cd1a7b679 100644
--- a/chromium/v8/src/wasm/function-compiler.h
+++ b/chromium/v8/src/wasm/function-compiler.h
@@ -127,8 +127,10 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
AllowGeneric allow_generic);
~JSToWasmWrapperCompilationUnit();
+ Isolate* isolate() const { return isolate_; }
+
void Execute();
- Handle<Code> Finalize(Isolate* isolate);
+ Handle<Code> Finalize();
bool is_import() const { return is_import_; }
const FunctionSig* sig() const { return sig_; }
@@ -146,6 +148,11 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final {
const WasmModule* module);
private:
+ // Wrapper compilation is bound to an isolate. Concurrent accesses to the
+ // isolate (during the "Execute" phase) must be audited carefully, i.e. we
+ // should only access immutable information (like the root table). The isolate
+ // is guaranteed to be alive when this unit executes.
+ Isolate* isolate_;
bool is_import_;
const FunctionSig* sig_;
bool use_generic_wrapper_;
diff --git a/chromium/v8/src/wasm/graph-builder-interface.cc b/chromium/v8/src/wasm/graph-builder-interface.cc
index c856f4d9493..c8b7d1834e4 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.cc
+++ b/chromium/v8/src/wasm/graph-builder-interface.cc
@@ -9,6 +9,7 @@
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
+#include "src/wasm/branch-hint-map.h"
#include "src/wasm/decoder.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
@@ -29,7 +30,7 @@ namespace {
// It maintains a control state that tracks whether the environment
// is reachable, has reached a control end, or has been merged.
struct SsaEnv : public ZoneObject {
- enum State { kControlEnd, kUnreachable, kReached, kMerged };
+ enum State { kUnreachable, kReached, kMerged };
State state;
TFNode* control;
@@ -50,11 +51,11 @@ struct SsaEnv : public ZoneObject {
effect(other.effect),
instance_cache(other.instance_cache),
locals(std::move(other.locals)) {
- other.Kill(kUnreachable);
+ other.Kill();
}
- void Kill(State new_state = kControlEnd) {
- state = new_state;
+ void Kill() {
+ state = kUnreachable;
for (TFNode*& local : locals) {
local = nullptr;
}
@@ -67,8 +68,6 @@ struct SsaEnv : public ZoneObject {
}
};
-constexpr uint32_t kNullCatch = static_cast<uint32_t>(-1);
-
class WasmGraphBuildingInterface {
public:
static constexpr Decoder::ValidateFlag validate = Decoder::kFullValidation;
@@ -97,7 +96,7 @@ class WasmGraphBuildingInterface {
};
struct Control : public ControlBase<Value, validate> {
- SsaEnv* end_env = nullptr; // end environment for the construct.
+ SsaEnv* merge_env = nullptr; // merge environment for the construct.
SsaEnv* false_env = nullptr; // false environment (only for if).
TryInfo* try_info = nullptr; // information about try statements.
int32_t previous_catch = -1; // previous Control with a catch.
@@ -110,10 +109,18 @@ class WasmGraphBuildingInterface {
: ControlBase(std::forward<Args>(args)...) {}
};
- explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder)
- : builder_(builder) {}
+ explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
+ int func_index)
+ : builder_(builder), func_index_(func_index) {}
void StartFunction(FullDecoder* decoder) {
+ // Get the branch hints map for this function (if available)
+ if (decoder->module_) {
+ auto branch_hints_it = decoder->module_->branch_hints.find(func_index_);
+ if (branch_hints_it != decoder->module_->branch_hints.end()) {
+ branch_hints_ = &branch_hints_it->second;
+ }
+ }
// The first '+ 1' is needed by TF Start node, the second '+ 1' is for the
// instance parameter.
builder_->Start(static_cast<int>(decoder->sig_->parameter_count() + 1 + 1));
@@ -156,15 +163,15 @@ class WasmGraphBuildingInterface {
void Block(FullDecoder* decoder, Control* block) {
// The branch environment is the outer environment.
- block->end_env = ssa_env_;
+ block->merge_env = ssa_env_;
SetEnv(Steal(decoder->zone(), ssa_env_));
}
void Loop(FullDecoder* decoder, Control* block) {
- SsaEnv* finish_try_env = Steal(decoder->zone(), ssa_env_);
- block->end_env = finish_try_env;
- SetEnv(finish_try_env);
- // The continue environment is the inner environment.
+ // This is the merge environment at the beginning of the loop.
+ SsaEnv* merge_env = Steal(decoder->zone(), ssa_env_);
+ block->merge_env = merge_env;
+ SetEnv(merge_env);
ssa_env_->state = SsaEnv::kMerged;
@@ -216,15 +223,15 @@ class WasmGraphBuildingInterface {
control());
}
+ // Now we setup a new environment for the inside of the loop.
SetEnv(Split(decoder->zone(), ssa_env_));
builder_->StackCheck(decoder->position());
-
ssa_env_->SetNotMerged();
- if (!decoder->ok()) return;
+
// Wrap input merge into phis.
for (uint32_t i = 0; i < block->start_merge.arity; ++i) {
Value& val = block->start_merge[i];
- TFNode* inputs[] = {val.node, block->end_env->control};
+ TFNode* inputs[] = {val.node, block->merge_env->control};
val.node = builder_->Phi(val.type, 1, inputs);
}
}
@@ -238,22 +245,34 @@ class WasmGraphBuildingInterface {
SsaEnv* try_env = Steal(decoder->zone(), outer_env);
SetEnv(try_env);
TryInfo* try_info = decoder->zone()->New<TryInfo>(catch_env);
- block->end_env = outer_env;
+ block->merge_env = outer_env;
block->try_info = try_info;
- block->previous_catch = current_catch_;
- current_catch_ = static_cast<int32_t>(decoder->control_depth() - 1);
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
- builder_->BranchNoHint(cond.node, &if_true, &if_false);
- SsaEnv* end_env = ssa_env_;
+ WasmBranchHint hint = WasmBranchHint::kNoHint;
+ if (branch_hints_) {
+ hint = branch_hints_->GetHintFor(decoder->pc_relative_offset());
+ }
+ switch (hint) {
+ case WasmBranchHint::kNoHint:
+ builder_->BranchNoHint(cond.node, &if_true, &if_false);
+ break;
+ case WasmBranchHint::kUnlikely:
+ builder_->BranchExpectFalse(cond.node, &if_true, &if_false);
+ break;
+ case WasmBranchHint::kLikely:
+ builder_->BranchExpectTrue(cond.node, &if_true, &if_false);
+ break;
+ }
+ SsaEnv* merge_env = ssa_env_;
SsaEnv* false_env = Split(decoder->zone(), ssa_env_);
false_env->control = if_false;
SsaEnv* true_env = Steal(decoder->zone(), ssa_env_);
true_env->control = if_true;
- if_block->end_env = end_env;
+ if_block->merge_env = merge_env;
if_block->false_env = false_env;
SetEnv(true_env);
}
@@ -294,11 +313,9 @@ class WasmGraphBuildingInterface {
MergeValuesInto(decoder, block, &block->end_merge, values);
}
// Now continue with the merged environment.
- SetEnv(block->end_env);
+ SetEnv(block->merge_env);
}
- void EndControl(FullDecoder* decoder, Control* block) { ssa_env_->Kill(); }
-
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
result->node = builder_->Unop(opcode, value.node, decoder->position());
@@ -482,7 +499,21 @@ class WasmGraphBuildingInterface {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(decoder->zone(), fenv);
fenv->SetNotMerged();
- builder_->BranchNoHint(cond.node, &tenv->control, &fenv->control);
+ WasmBranchHint hint = WasmBranchHint::kNoHint;
+ if (branch_hints_) {
+ hint = branch_hints_->GetHintFor(decoder->pc_relative_offset());
+ }
+ switch (hint) {
+ case WasmBranchHint::kNoHint:
+ builder_->BranchNoHint(cond.node, &tenv->control, &fenv->control);
+ break;
+ case WasmBranchHint::kUnlikely:
+ builder_->BranchExpectFalse(cond.node, &tenv->control, &fenv->control);
+ break;
+ case WasmBranchHint::kLikely:
+ builder_->BranchExpectTrue(cond.node, &tenv->control, &fenv->control);
+ break;
+ }
builder_->SetControl(fenv->control);
SetEnv(tenv);
BrOrRet(decoder, depth, 1);
@@ -639,6 +670,19 @@ class WasmGraphBuildingInterface {
SetEnv(false_env);
}
+ void BrOnNonNull(FullDecoder* decoder, const Value& ref_object,
+ uint32_t depth) {
+ SsaEnv* false_env = ssa_env_;
+ SsaEnv* true_env = Split(decoder->zone(), false_env);
+ false_env->SetNotMerged();
+ builder_->BrOnNull(ref_object.node, &false_env->control,
+ &true_env->control);
+ builder_->SetControl(false_env->control);
+ SetEnv(true_env);
+ BrOrRet(decoder, depth, 0);
+ SetEnv(false_env);
+ }
+
void SimdOp(FullDecoder* decoder, WasmOpcode opcode, Vector<Value> args,
Value* result) {
NodeVector inputs(args.size());
@@ -689,9 +733,6 @@ class WasmGraphBuildingInterface {
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
-
- current_catch_ = block->previous_catch; // Pop try scope.
-
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
@@ -743,7 +784,6 @@ class WasmGraphBuildingInterface {
// and IfFailure nodes.
builder_->Rethrow(block->try_info->exception);
TerminateThrow(decoder);
- current_catch_ = block->previous_catch;
return;
}
DCHECK(decoder->control_at(depth)->is_try());
@@ -765,7 +805,6 @@ class WasmGraphBuildingInterface {
target_try->exception, block->try_info->exception);
}
}
- current_catch_ = block->previous_catch;
}
void CatchAll(FullDecoder* decoder, Control* block) {
@@ -773,8 +812,6 @@ class WasmGraphBuildingInterface {
block->is_try_unwind());
DCHECK_EQ(decoder->control_at(0), block);
- current_catch_ = block->previous_catch; // Pop try scope.
-
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
@@ -1080,7 +1117,8 @@ class WasmGraphBuildingInterface {
private:
SsaEnv* ssa_env_ = nullptr;
compiler::WasmGraphBuilder* builder_;
- uint32_t current_catch_ = kNullCatch;
+ int func_index_;
+ const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
@@ -1088,13 +1126,9 @@ class WasmGraphBuildingInterface {
TFNode* control() { return builder_->control(); }
- uint32_t control_depth_of_current_catch(FullDecoder* decoder) {
- return decoder->control_depth() - 1 - current_catch_;
- }
-
TryInfo* current_try_info(FullDecoder* decoder) {
- DCHECK_LT(current_catch_, decoder->control_depth());
- return decoder->control_at(control_depth_of_current_catch(decoder))
+ DCHECK_LT(decoder->current_catch(), decoder->control_depth());
+ return decoder->control_at(decoder->control_depth_of_current_catch())
->try_info;
}
@@ -1122,9 +1156,6 @@ class WasmGraphBuildingInterface {
case SsaEnv::kMerged:
state = 'M';
break;
- case SsaEnv::kControlEnd:
- state = 'E';
- break;
}
}
PrintF("{set_env = %p, state = %c", env, state);
@@ -1146,7 +1177,7 @@ class WasmGraphBuildingInterface {
V8_INLINE TFNode* CheckForException(FullDecoder* decoder, TFNode* node) {
if (node == nullptr) return nullptr;
- const bool inside_try_scope = current_catch_ != kNullCatch;
+ const bool inside_try_scope = decoder->current_catch() != -1;
if (!inside_try_scope) return node;
return CheckForExceptionImpl(decoder, node);
@@ -1170,7 +1201,7 @@ class WasmGraphBuildingInterface {
TryInfo* try_info = current_try_info(decoder);
if (FLAG_wasm_loop_unrolling) {
ValueVector values;
- BuildNestedLoopExits(decoder, control_depth_of_current_catch(decoder),
+ BuildNestedLoopExits(decoder, decoder->control_depth_of_current_catch(),
true, values, &if_exception);
}
Goto(decoder, try_info->catch_env);
@@ -1218,8 +1249,10 @@ class WasmGraphBuildingInterface {
Value* values) {
DCHECK(merge == &c->start_merge || merge == &c->end_merge);
- SsaEnv* target = c->end_env;
+ SsaEnv* target = c->merge_env;
+ // This has to be computed before calling Goto().
const bool first = target->state == SsaEnv::kUnreachable;
+
Goto(decoder, target);
if (merge->arity == 0) return;
@@ -1327,7 +1360,6 @@ class WasmGraphBuildingInterface {
default:
UNREACHABLE();
}
- return ssa_env_->Kill();
}
// Create a complete copy of {from}.
@@ -1357,11 +1389,6 @@ class WasmGraphBuildingInterface {
return result;
}
- // Create an unreachable environment.
- SsaEnv* UnreachableEnv(Zone* zone) {
- return zone->New<SsaEnv>(zone, SsaEnv::kUnreachable, nullptr, nullptr, 0);
- }
-
void DoCall(FullDecoder* decoder, CallMode call_mode, uint32_t table_index,
CheckForNull null_check, TFNode* caller_node,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
@@ -1523,10 +1550,11 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins) {
+ compiler::NodeOriginTable* node_origins,
+ int func_index) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder);
+ &zone, module, enabled, detected, body, builder, func_index);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
diff --git a/chromium/v8/src/wasm/graph-builder-interface.h b/chromium/v8/src/wasm/graph-builder-interface.h
index ce125313e44..6c668e2b0a0 100644
--- a/chromium/v8/src/wasm/graph-builder-interface.h
+++ b/chromium/v8/src/wasm/graph-builder-interface.h
@@ -10,7 +10,6 @@
#define V8_WASM_GRAPH_BUILDER_INTERFACE_H_
#include "src/wasm/decoder.h"
-#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
namespace v8 {
@@ -33,7 +32,7 @@ BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
const WasmModule* module, compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins);
+ compiler::NodeOriginTable* node_origins, int func_index);
} // namespace wasm
} // namespace internal
diff --git a/chromium/v8/src/wasm/memory-protection-key.cc b/chromium/v8/src/wasm/memory-protection-key.cc
new file mode 100644
index 00000000000..e8252cd9ce9
--- /dev/null
+++ b/chromium/v8/src/wasm/memory-protection-key.cc
@@ -0,0 +1,189 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/memory-protection-key.h"
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+#include <sys/mman.h> // For {mprotect()} protection macros.
+#undef MAP_TYPE // Conflicts with MAP_TYPE in Torque-generated instance-types.h
+#endif
+
+#include "src/base/build_config.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+// Runtime-detection of PKU support with {dlsym()}.
+//
+// For now, we support memory protection keys/PKEYs/PKU only for Linux on x64
+// based on glibc functions {pkey_alloc()}, {pkey_free()}, etc.
+// Those functions are only available since glibc version 2.27:
+// https://man7.org/linux/man-pages/man2/pkey_alloc.2.html
+// However, if we check the glibc verison with V8_GLIBC_PREPREQ here at compile
+// time, this causes two problems due to dynamic linking of glibc:
+// 1) If the compiling system _has_ a new enough glibc, the binary will include
+// calls to {pkey_alloc()} etc., and then the runtime system must supply a
+// new enough glibc version as well. That is, this potentially breaks runtime
+// compatability on older systems (e.g., Ubuntu 16.04 with glibc 2.23).
+// 2) If the compiling system _does not_ have a new enough glibc, PKU support
+// will not be compiled in, even though the runtime system potentially _does_
+// have support for it due to a new enough Linux kernel and glibc version.
+// That is, this results in non-optimal security (PKU available, but not used).
+// Hence, we do _not_ check the glibc version during compilation, and instead
+// only at runtime try to load {pkey_alloc()} etc. with {dlsym()}.
+// TODO(dlehmann): Move this import and freestanding functions below to
+// base/platform/platform.h {OS} (lower-level functions) and
+// {base::PageAllocator} (exported API).
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+#include <dlfcn.h>
+#endif
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(dlehmann) Security: Are there alternatives to disabling CFI altogether
+// for the functions below? Since they are essentially an arbitrary indirect
+// call gadget, disabling CFI should be only a last resort. In Chromium, there
+// was {base::ProtectedMemory} to protect the function pointer from being
+// overwritten, but t seems it was removed to not begin used and AFAICT no such
+// thing exists in V8 to begin with. See
+// https://www.chromium.org/developers/testing/control-flow-integrity and
+// https://crrev.com/c/1884819.
+// What is the general solution for CFI + {dlsym()}?
+// An alternative would be to not rely on glibc and instead implement PKEY
+// directly on top of Linux syscalls + inline asm, but that is quite some low-
+// level code (probably in the order of 100 lines).
+DISABLE_CFI_ICALL
+int AllocateMemoryProtectionKey() {
+// See comment on the import on feature testing for PKEY support.
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ // Try to to find {pkey_alloc()} support in glibc.
+ typedef int (*pkey_alloc_t)(unsigned int, unsigned int);
+ // Cache the {dlsym()} lookup in a {static} variable.
+ static auto* pkey_alloc =
+ bit_cast<pkey_alloc_t>(dlsym(RTLD_DEFAULT, "pkey_alloc"));
+ if (pkey_alloc != nullptr) {
+ // If there is support in glibc, try to allocate a new key.
+ // This might still return -1, e.g., because the kernel does not support
+ // PKU or because there is no more key available.
+ // Different reasons for why {pkey_alloc()} failed could be checked with
+ // errno, e.g., EINVAL vs ENOSPC vs ENOSYS. See manpages and glibc manual
+ // (the latter is the authorative source):
+ // https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys
+ return pkey_alloc(/* flags, unused */ 0, kDisableAccess);
+ }
+#endif
+ return kNoMemoryProtectionKey;
+}
+
+DISABLE_CFI_ICALL
+void FreeMemoryProtectionKey(int key) {
+ // Only free the key if one was allocated.
+ if (key == kNoMemoryProtectionKey) return;
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_free_t)(int);
+ static auto* pkey_free =
+ bit_cast<pkey_free_t>(dlsym(RTLD_DEFAULT, "pkey_free"));
+ // If a valid key was allocated, {pkey_free()} must also be available.
+ DCHECK_NOT_NULL(pkey_free);
+
+ int ret = pkey_free(key);
+ CHECK_EQ(/* success */ 0, ret);
+#else
+ // On platforms without PKU support, we should have already returned because
+ // the key must be {kNoMemoryProtectionKey}.
+ UNREACHABLE();
+#endif
+}
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+// TODO(dlehmann): Copied from base/platform/platform-posix.cc. Should be
+// removed once this code is integrated in base/platform/platform-linux.cc.
+int GetProtectionFromMemoryPermission(base::OS::MemoryPermission access) {
+ switch (access) {
+ case base::OS::MemoryPermission::kNoAccess:
+ case base::OS::MemoryPermission::kNoAccessWillJitLater:
+ return PROT_NONE;
+ case base::OS::MemoryPermission::kRead:
+ return PROT_READ;
+ case base::OS::MemoryPermission::kReadWrite:
+ return PROT_READ | PROT_WRITE;
+ case base::OS::MemoryPermission::kReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ case base::OS::MemoryPermission::kReadExecute:
+ return PROT_READ | PROT_EXEC;
+ }
+ UNREACHABLE();
+}
+#endif
+
+DISABLE_CFI_ICALL
+bool SetPermissionsAndMemoryProtectionKey(
+ PageAllocator* page_allocator, base::AddressRegion region,
+ PageAllocator::Permission page_permissions, int key) {
+ DCHECK_NOT_NULL(page_allocator);
+
+ void* address = reinterpret_cast<void*>(region.begin());
+ size_t size = region.size();
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_mprotect_t)(void*, size_t, int, int);
+ static auto* pkey_mprotect =
+ bit_cast<pkey_mprotect_t>(dlsym(RTLD_DEFAULT, "pkey_mprotect"));
+
+ if (pkey_mprotect == nullptr) {
+ // If there is no runtime support for {pkey_mprotect()}, no key should have
+ // been allocated in the first place.
+ DCHECK_EQ(kNoMemoryProtectionKey, key);
+
+ // Without PKU support, fallback to regular {mprotect()}.
+ return page_allocator->SetPermissions(address, size, page_permissions);
+ }
+
+ // Copied with slight modifications from base/platform/platform-posix.cc
+ // {OS::SetPermissions()}.
+ // TODO(dlehmann): Move this block into its own function at the right
+ // abstraction boundary (likely some static method in platform.h {OS})
+ // once the whole PKU code is moved into base/platform/.
+ DCHECK_EQ(0, region.begin() % page_allocator->CommitPageSize());
+ DCHECK_EQ(0, size % page_allocator->CommitPageSize());
+
+ int protection = GetProtectionFromMemoryPermission(
+ static_cast<base::OS::MemoryPermission>(page_permissions));
+
+ int ret = pkey_mprotect(address, size, protection, key);
+
+ return ret == /* success */ 0;
+#else
+ // Without PKU support, fallback to regular {mprotect()}.
+ return page_allocator->SetPermissions(address, size, page_permissions);
+#endif
+}
+
+DISABLE_CFI_ICALL
+bool SetPermissionsForMemoryProtectionKey(
+ int key, MemoryProtectionKeyPermission permissions) {
+ if (key == kNoMemoryProtectionKey) return false;
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+ typedef int (*pkey_set_t)(int, unsigned int);
+ static auto* pkey_set = bit_cast<pkey_set_t>(dlsym(RTLD_DEFAULT, "pkey_set"));
+ // If a valid key was allocated, {pkey_set()} must also be available.
+ DCHECK_NOT_NULL(pkey_set);
+
+ int ret = pkey_set(key, permissions);
+
+ return ret == /* success */ 0;
+#else
+ // On platforms without PKU support, we should have already returned because
+ // the key must be {kNoMemoryProtectionKey}.
+ UNREACHABLE();
+#endif
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/memory-protection-key.h b/chromium/v8/src/wasm/memory-protection-key.h
new file mode 100644
index 00000000000..9f9a200cdfe
--- /dev/null
+++ b/chromium/v8/src/wasm/memory-protection-key.h
@@ -0,0 +1,90 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_WASM_MEMORY_PROTECTION_KEY_H_
+#define V8_WASM_MEMORY_PROTECTION_KEY_H_
+
+#if defined(V8_OS_LINUX) && defined(V8_HOST_ARCH_X64)
+#include <sys/mman.h> // For STATIC_ASSERT of permission values.
+#undef MAP_TYPE // Conflicts with MAP_TYPE in Torque-generated instance-types.h
+#endif
+
+#include "include/v8-platform.h"
+#include "src/base/address-region.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// TODO(dlehmann): Move this to base/platform/platform.h {OS} (lower-level API)
+// and {base::PageAllocator} (higher-level, exported API) once the API is more
+// stable and we have converged on a better design (e.g., typed class wrapper
+// around int memory protection key).
+
+// Sentinel value if there is no PKU support or allocation of a key failed.
+// This is also the return value on an error of pkey_alloc() and has the
+// benefit that calling pkey_mprotect() with -1 behaves the same as regular
+// mprotect().
+constexpr int kNoMemoryProtectionKey = -1;
+
+// Permissions for memory protection keys on top of the page's permissions.
+// NOTE: Since there is no executable bit, the executable permission cannot be
+// withdrawn by memory protection keys.
+enum MemoryProtectionKeyPermission {
+ kNoRestrictions = 0,
+ kDisableAccess = 1,
+ kDisableWrite = 2,
+};
+
+// If sys/mman.h has PKEY support (on newer Linux distributions), ensure that
+// our definitions of the permissions is consistent with the ones in glibc.
+#if defined(PKEY_DISABLE_ACCESS)
+STATIC_ASSERT(kDisableAccess == PKEY_DISABLE_ACCESS);
+STATIC_ASSERT(kDisableWrite == PKEY_DISABLE_WRITE);
+#endif
+
+// Allocates a memory protection key on platforms with PKU support, returns
+// {kNoMemoryProtectionKey} on platforms without support or when allocation
+// failed at runtime.
+int AllocateMemoryProtectionKey();
+
+// Frees the given memory protection key, to make it available again for the
+// next call to {AllocateMemoryProtectionKey()}. Note that this does NOT
+// invalidate access rights to pages that are still tied to that key. That is,
+// if the key is reused and pages with that key are still accessable, this might
+// be a security issue. See
+// https://www.gnu.org/software/libc/manual/html_mono/libc.html#Memory-Protection-Keys
+void FreeMemoryProtectionKey(int key);
+
+// Associates a memory protection {key} with the given {region}.
+// If {key} is {kNoMemoryProtectionKey} this behaves like "plain"
+// {SetPermissions()} and associates the default key to the region. That is,
+// explicitly calling with {kNoMemoryProtectionKey} can be used to disassociate
+// any protection key from a region. This also means "plain" {SetPermissions()}
+// disassociates the key from a region, making the key's access restrictions
+// irrelevant/inactive for that region.
+// Returns true if changing permissions and key was successful. (Returns a bool
+// to be consistent with {SetPermissions()}).
+// The {page_permissions} are the permissions of the page, not the key. For
+// changing the permissions of the key, use
+// {SetPermissionsForMemoryProtectionKey()} instead.
+bool SetPermissionsAndMemoryProtectionKey(
+ PageAllocator* page_allocator, base::AddressRegion region,
+ PageAllocator::Permission page_permissions, int key);
+
+// Set the key's permissions and return whether this was successful.
+// Returns false on platforms without PKU support or when the operation failed,
+// e.g., because the key was invalid.
+bool SetPermissionsForMemoryProtectionKey(
+ int key, MemoryProtectionKeyPermission permissions);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_MEMORY_PROTECTION_KEY_H_
diff --git a/chromium/v8/src/wasm/memory-tracing.cc b/chromium/v8/src/wasm/memory-tracing.cc
index 0d88c4b461e..68310a03f31 100644
--- a/chromium/v8/src/wasm/memory-tracing.cc
+++ b/chromium/v8/src/wasm/memory-tracing.cc
@@ -50,7 +50,7 @@ void TraceMemoryOperation(base::Optional<ExecutionTier> tier,
}
const char* eng =
tier.has_value() ? ExecutionTierToString(tier.value()) : "?";
- printf("%-11s func:%6d+0x%-6x%s %016" PRIuPTR " val: %s\n", eng, func_index,
+ printf("%-11s func:%6d:0x%-6x%s %016" PRIuPTR " val: %s\n", eng, func_index,
position, info->is_store ? " store to" : "load from", info->offset,
value.begin());
}
diff --git a/chromium/v8/src/wasm/module-compiler.cc b/chromium/v8/src/wasm/module-compiler.cc
index 3b1d8750bac..4742a85070c 100644
--- a/chromium/v8/src/wasm/module-compiler.cc
+++ b/chromium/v8/src/wasm/module-compiler.cc
@@ -32,7 +32,6 @@
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
-#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/wasm/wasm-serialization.h"
@@ -531,17 +530,19 @@ class CompilationStateImpl {
CompilationStateImpl(const std::shared_ptr<NativeModule>& native_module,
std::shared_ptr<Counters> async_counters);
~CompilationStateImpl() {
- DCHECK(compile_job_->IsValid());
- compile_job_->CancelAndDetach();
+ if (compile_job_->IsValid()) compile_job_->CancelAndDetach();
}
// Call right after the constructor, after the {compilation_state_} field in
// the {NativeModule} has been initialized.
void InitCompileJob(WasmEngine*);
- // Cancel all background compilation, without waiting for compile tasks to
- // finish.
- void CancelCompilation();
+ // {kCancelUnconditionally}: Cancel all compilation.
+ // {kCancelInitialCompilation}: Cancel all compilation if initial (baseline)
+ // compilation is not finished yet.
+ enum CancellationPolicy { kCancelUnconditionally, kCancelInitialCompilation };
+ void CancelCompilation(CancellationPolicy);
+
bool cancelled() const;
// Initialize compilation progress. Set compilation tiers to expect for
@@ -767,7 +768,6 @@ void UpdateFeatureUseCounts(Isolate* isolate, const WasmFeatures& detected) {
using Feature = v8::Isolate::UseCounterFeature;
constexpr static std::pair<WasmFeature, Feature> kUseCounters[] = {
{kFeature_reftypes, Feature::kWasmRefTypes},
- {kFeature_mv, Feature::kWasmMultiValue},
{kFeature_simd, Feature::kWasmSimdOpcodes},
{kFeature_threads, Feature::kWasmThreadOpcodes},
{kFeature_eh, Feature::kWasmExceptionHandling}};
@@ -791,7 +791,14 @@ void CompilationState::InitCompileJob(WasmEngine* engine) {
Impl(this)->InitCompileJob(engine);
}
-void CompilationState::CancelCompilation() { Impl(this)->CancelCompilation(); }
+void CompilationState::CancelCompilation() {
+ Impl(this)->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
+}
+
+void CompilationState::CancelInitialCompilation() {
+ Impl(this)->CancelCompilation(
+ CompilationStateImpl::kCancelInitialCompilation);
+}
void CompilationState::SetError() { Impl(this)->SetError(); }
@@ -1202,16 +1209,25 @@ CompilationExecutionResult ExecuteJSToWasmWrapperCompilationUnits(
std::shared_ptr<JSToWasmWrapperCompilationUnit> wrapper_unit = nullptr;
int num_processed_wrappers = 0;
+ OperationsBarrier::Token wrapper_compilation_token;
+ Isolate* isolate;
+
{
BackgroundCompileScope compile_scope(native_module);
if (compile_scope.cancelled()) return kYield;
wrapper_unit = compile_scope.compilation_state()
->GetNextJSToWasmWrapperCompilationUnit();
if (!wrapper_unit) return kNoMoreUnits;
+ isolate = wrapper_unit->isolate();
+ wrapper_compilation_token =
+ compile_scope.native_module()->engine()->StartWrapperCompilation(
+ isolate);
+ if (!wrapper_compilation_token) return kNoMoreUnits;
}
TRACE_EVENT0("v8.wasm", "wasm.JSToWasmWrapperCompilation");
while (true) {
+ DCHECK_EQ(isolate, wrapper_unit->isolate());
wrapper_unit->Execute();
++num_processed_wrappers;
bool yield = delegate && delegate->ShouldYield();
@@ -1829,10 +1845,10 @@ std::shared_ptr<StreamingDecoder> AsyncCompileJob::CreateStreamingDecoder() {
AsyncCompileJob::~AsyncCompileJob() {
// Note: This destructor always runs on the foreground thread of the isolate.
background_task_manager_.CancelAndWait();
- // If the runtime objects were not created yet, then initial compilation did
- // not finish yet. In this case we can abort compilation.
- if (native_module_ && module_object_.is_null()) {
- Impl(native_module_->compilation_state())->CancelCompilation();
+ // If initial compilation did not finish yet we can abort it.
+ if (native_module_) {
+ Impl(native_module_->compilation_state())
+ ->CancelCompilation(CompilationStateImpl::kCancelInitialCompilation);
}
// Tell the streaming decoder that the AsyncCompileJob is not available
// anymore.
@@ -2459,7 +2475,8 @@ void AsyncStreamingProcessor::FinishAsyncCompileJobWithError(
// Check if there is already a CompiledModule, in which case we have to clean
// up the CompilationStateImpl as well.
if (job_->native_module_) {
- Impl(job_->native_module_->compilation_state())->CancelCompilation();
+ Impl(job_->native_module_->compilation_state())
+ ->CancelCompilation(CompilationStateImpl::kCancelUnconditionally);
job_->DoSync<AsyncCompileJob::DecodeFail,
AsyncCompileJob::kUseExistingForegroundTask>(error);
@@ -2783,13 +2800,22 @@ void CompilationStateImpl::InitCompileJob(WasmEngine* engine) {
async_counters_));
}
-void CompilationStateImpl::CancelCompilation() {
+void CompilationStateImpl::CancelCompilation(
+ CompilationStateImpl::CancellationPolicy cancellation_policy) {
+ base::MutexGuard callbacks_guard(&callbacks_mutex_);
+
+ if (cancellation_policy == kCancelInitialCompilation &&
+ finished_events_.contains(
+ CompilationEvent::kFinishedBaselineCompilation)) {
+ // Initial compilation already finished; cannot be cancelled.
+ return;
+ }
+
// std::memory_order_relaxed is sufficient because no other state is
// synchronized with |compile_cancelled_|.
compile_cancelled_.store(true, std::memory_order_relaxed);
// No more callbacks after abort.
- base::MutexGuard callbacks_guard(&callbacks_mutex_);
callbacks_.clear();
}
@@ -3040,7 +3066,8 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers(
js_to_wasm_wrapper_units_.size());
CodeSpaceMemoryModificationScope modification_scope(isolate->heap());
for (auto& unit : js_to_wasm_wrapper_units_) {
- Handle<Code> code = unit->Finalize(isolate);
+ DCHECK_EQ(isolate, unit->isolate());
+ Handle<Code> code = unit->Finalize();
int wrapper_index =
GetExportWrapperIndex(module, unit->sig(), unit->is_import());
(*export_wrappers_out)->set(wrapper_index, *code);
@@ -3090,7 +3117,8 @@ void CompilationStateImpl::OnFinishedUnits(Vector<WasmCode*> code_vector) {
DCHECK_NOT_NULL(code);
DCHECK_LT(code->index(), native_module_->num_functions());
- if (code->index() < native_module_->num_imported_functions()) {
+ if (code->index() <
+ static_cast<int>(native_module_->num_imported_functions())) {
// Import wrapper.
DCHECK_EQ(code->tier(), ExecutionTier::kTurbofan);
outstanding_baseline_units_--;
@@ -3449,7 +3477,8 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module,
for (auto& pair : compilation_units) {
JSToWasmWrapperKey key = pair.first;
JSToWasmWrapperCompilationUnit* unit = pair.second.get();
- Handle<Code> code = unit->Finalize(isolate);
+ DCHECK_EQ(isolate, unit->isolate());
+ Handle<Code> code = unit->Finalize();
int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first);
(*export_wrappers_out)->set(wrapper_index, *code);
RecordStats(*code, isolate->counters());
diff --git a/chromium/v8/src/wasm/module-decoder.cc b/chromium/v8/src/wasm/module-decoder.cc
index f2c77efb230..be4d8ef833d 100644
--- a/chromium/v8/src/wasm/module-decoder.cc
+++ b/chromium/v8/src/wasm/module-decoder.cc
@@ -34,6 +34,7 @@ namespace {
constexpr char kNameString[] = "name";
constexpr char kSourceMappingURLString[] = "sourceMappingURL";
constexpr char kCompilationHintsString[] = "compilationHints";
+constexpr char kBranchHintsString[] = "branchHints";
constexpr char kDebugInfoString[] = ".debug_info";
constexpr char kExternalDebugInfoString[] = "external_debug_info";
@@ -95,6 +96,8 @@ const char* SectionName(SectionCode code) {
return kExternalDebugInfoString;
case kCompilationHintsSectionCode:
return kCompilationHintsString;
+ case kBranchHintsSectionCode:
+ return kBranchHintsString;
default:
return "<unknown>";
}
@@ -144,6 +147,7 @@ SectionCode IdentifyUnknownSectionInternal(Decoder* decoder) {
{StaticCharVector(kNameString), kNameSectionCode},
{StaticCharVector(kSourceMappingURLString), kSourceMappingURLSectionCode},
{StaticCharVector(kCompilationHintsString), kCompilationHintsSectionCode},
+ {StaticCharVector(kBranchHintsString), kBranchHintsSectionCode},
{StaticCharVector(kDebugInfoString), kDebugInfoSectionCode},
{StaticCharVector(kExternalDebugInfoString),
kExternalDebugInfoSectionCode}};
@@ -432,6 +436,13 @@ class ModuleDecoderImpl : public Decoder {
// first occurrence after function section and before code section are
// ignored.
break;
+ case kBranchHintsSectionCode:
+ // TODO(yuri): report out of place branch hints section as a
+ // warning.
+ // Be lenient with placement of compilation hints section. All except
+ // first occurrence after function section and before code section are
+ // ignored.
+ break;
default:
next_ordered_section_ = section_code + 1;
break;
@@ -498,6 +509,15 @@ class ModuleDecoderImpl : public Decoder {
consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
break;
+ case kBranchHintsSectionCode:
+ if (enabled_features_.has_branch_hinting()) {
+ DecodeBranchHintsSection();
+ } else {
+ // Ignore this section when feature was disabled. It is an optional
+ // custom section anyways.
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+ break;
case kDataCountSectionCode:
DecodeDataCountSection();
break;
@@ -884,50 +904,25 @@ class ModuleDecoderImpl : public Decoder {
uint32_t element_count =
consume_count("element count", FLAG_wasm_max_table_size);
- for (uint32_t i = 0; ok() && i < element_count; ++i) {
- const byte* pos = pc();
-
- WasmElemSegment::Status status;
- bool functions_as_elements;
- uint32_t table_index;
- WasmInitExpr offset;
- ValueType type = kWasmBottom;
- consume_element_segment_header(&status, &functions_as_elements, &type,
- &table_index, &offset);
+ for (uint32_t i = 0; i < element_count; ++i) {
+ bool expressions_as_elements;
+ WasmElemSegment segment =
+ consume_element_segment_header(&expressions_as_elements);
if (failed()) return;
- DCHECK_NE(type, kWasmBottom);
-
- if (status == WasmElemSegment::kStatusActive) {
- if (table_index >= module_->tables.size()) {
- errorf(pos, "out of bounds table index %u", table_index);
- break;
- }
- if (!IsSubtypeOf(type, module_->tables[table_index].type,
- this->module_.get())) {
- errorf(pos,
- "Invalid element segment. Table %u is not a super-type of %s",
- table_index, type.name().c_str());
- break;
- }
- }
+ DCHECK_NE(segment.type, kWasmBottom);
uint32_t num_elem =
consume_count("number of elements", max_table_init_entries());
- if (status == WasmElemSegment::kStatusActive) {
- module_->elem_segments.emplace_back(table_index, std::move(offset));
- } else {
- module_->elem_segments.emplace_back(
- status == WasmElemSegment::kStatusDeclarative);
- }
- WasmElemSegment* init = &module_->elem_segments.back();
- init->type = type;
for (uint32_t j = 0; j < num_elem; j++) {
- uint32_t index = functions_as_elements ? consume_element_expr()
- : consume_element_func_index();
- if (failed()) break;
- init->entries.push_back(index);
+ WasmInitExpr init =
+ expressions_as_elements
+ ? consume_element_expr()
+ : WasmInitExpr::RefFuncConst(consume_element_func_index());
+ if (failed()) return;
+ segment.entries.push_back(std::move(init));
}
+ module_->elem_segments.push_back(std::move(segment));
}
}
@@ -1174,6 +1169,82 @@ class ModuleDecoderImpl : public Decoder {
// consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
}
+ void DecodeBranchHintsSection() {
+ TRACE("DecodeBranchHints module+%d\n", static_cast<int>(pc_ - start_));
+ if (!has_seen_unordered_section(kBranchHintsSectionCode)) {
+ set_seen_unordered_section(kBranchHintsSectionCode);
+ // Use an inner decoder so that errors don't fail the outer decoder.
+ Decoder inner(start_, pc_, end_, buffer_offset_);
+ BranchHintInfo branch_hints;
+
+ uint32_t func_count = inner.consume_u32v("number of functions");
+ // Keep track of the previous function index to validate the ordering
+ int64_t last_func_idx = -1;
+ for (uint32_t i = 0; i < func_count; i++) {
+ uint32_t func_idx = inner.consume_u32v("function index");
+ if (int64_t(func_idx) <= last_func_idx) {
+ inner.errorf("Invalid function index: %d", func_idx);
+ break;
+ }
+ last_func_idx = func_idx;
+ uint8_t reserved = inner.consume_u8("reserved byte");
+ if (reserved != 0x0) {
+ inner.errorf("Invalid reserved byte: %#x", reserved);
+ break;
+ }
+ uint32_t num_hints = inner.consume_u32v("number of hints");
+ BranchHintMap func_branch_hints;
+ TRACE("DecodeBranchHints[%d] module+%d\n", func_idx,
+ static_cast<int>(inner.pc() - inner.start()));
+ // Keep track of the previous branch offset to validate the ordering
+ int64_t last_br_off = -1;
+ for (uint32_t j = 0; j < num_hints; ++j) {
+ uint32_t br_dir = inner.consume_u32v("branch direction");
+ uint32_t br_off = inner.consume_u32v("branch instruction offset");
+ if (int64_t(br_off) <= last_br_off) {
+ inner.errorf("Invalid branch offset: %d", br_off);
+ break;
+ }
+ last_br_off = br_off;
+ TRACE("DecodeBranchHints[%d][%d] module+%d\n", func_idx, br_off,
+ static_cast<int>(inner.pc() - inner.start()));
+ WasmBranchHint hint;
+ switch (br_dir) {
+ case 0:
+ hint = WasmBranchHint::kUnlikely;
+ break;
+ case 1:
+ hint = WasmBranchHint::kLikely;
+ break;
+ default:
+ hint = WasmBranchHint::kNoHint;
+ inner.errorf(inner.pc(), "Invalid branch hint %#x", br_dir);
+ break;
+ }
+ if (!inner.ok()) {
+ break;
+ }
+ func_branch_hints.insert(br_off, hint);
+ }
+ if (!inner.ok()) {
+ break;
+ }
+ branch_hints.emplace(func_idx, std::move(func_branch_hints));
+ }
+ // Extra unexpected bytes are an error.
+ if (inner.more()) {
+ inner.errorf("Unexpected extra bytes: %d\n",
+ static_cast<int>(inner.pc() - inner.start()));
+ }
+ // If everything went well, accept the hints for the module.
+ if (inner.ok()) {
+ module_->branch_hints = std::move(branch_hints);
+ }
+ }
+ // Skip the whole branch hints section in the outer decoder.
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+
void DecodeDataCountSection() {
module_->num_declared_data_segments =
consume_count("data segments count", kV8MaxWasmDataSegments);
@@ -1911,10 +1982,8 @@ class ModuleDecoderImpl : public Decoder {
std::vector<ValueType> returns;
// Parse return types.
- const size_t max_return_count = enabled_features_.has_mv()
- ? kV8MaxWasmFunctionMultiReturns
- : kV8MaxWasmFunctionReturns;
- uint32_t return_count = consume_count("return count", max_return_count);
+ uint32_t return_count =
+ consume_count("return count", kV8MaxWasmFunctionReturns);
if (failed()) return nullptr;
for (uint32_t i = 0; ok() && i < return_count; ++i) {
returns.push_back(consume_value_type());
@@ -1967,86 +2036,114 @@ class ModuleDecoderImpl : public Decoder {
return attribute;
}
- void consume_element_segment_header(WasmElemSegment::Status* status,
- bool* functions_as_elements,
- ValueType* type, uint32_t* table_index,
- WasmInitExpr* offset) {
+ WasmElemSegment consume_element_segment_header(
+ bool* expressions_as_elements) {
const byte* pos = pc();
- uint32_t flag = consume_u32v("flag");
// The mask for the bit in the flag which indicates if the segment is
- // active or not.
- constexpr uint8_t kIsPassiveMask = 0x01;
- // The mask for the bit in the flag which indicates if the segment has an
- // explicit table index field.
- constexpr uint8_t kHasTableIndexMask = 0x02;
+ // active or not (0 is active).
+ constexpr uint8_t kNonActiveMask = 1 << 0;
+ // The mask for the bit in the flag which indicates:
+ // - for active tables, if the segment has an explicit table index field.
+ // - for non-active tables, whether the table is declarative (vs. passive).
+ constexpr uint8_t kHasTableIndexOrIsDeclarativeMask = 1 << 1;
// The mask for the bit in the flag which indicates if the functions of this
- // segment are defined as function indices (=0) or elements(=1).
- constexpr uint8_t kFunctionsAsElementsMask = 0x04;
- constexpr uint8_t kFullMask =
- kIsPassiveMask | kHasTableIndexMask | kFunctionsAsElementsMask;
-
- bool is_passive = flag & kIsPassiveMask;
- if (!is_passive) {
- *status = WasmElemSegment::kStatusActive;
- if (module_->tables.size() == 0) {
- error(pc_, "Active element sections require a table");
- }
- } else if ((flag & kHasTableIndexMask)) { // Special bit combination for
- // declarative segments.
- *status = WasmElemSegment::kStatusDeclarative;
- } else {
- *status = WasmElemSegment::kStatusPassive;
+ // segment are defined as function indices (0) or init. expressions (1).
+ constexpr uint8_t kExpressionsAsElementsMask = 1 << 2;
+ constexpr uint8_t kFullMask = kNonActiveMask |
+ kHasTableIndexOrIsDeclarativeMask |
+ kExpressionsAsElementsMask;
+
+ uint32_t flag = consume_u32v("flag");
+ if ((flag & kFullMask) != flag) {
+ errorf(pos, "illegal flag value %u. Must be between 0 and 7", flag);
+ return {};
}
- *functions_as_elements = flag & kFunctionsAsElementsMask;
- bool has_table_index = (flag & kHasTableIndexMask) &&
- *status == WasmElemSegment::kStatusActive;
- if (*status == WasmElemSegment::kStatusDeclarative &&
+ const WasmElemSegment::Status status =
+ (flag & kNonActiveMask) ? (flag & kHasTableIndexOrIsDeclarativeMask)
+ ? WasmElemSegment::kStatusDeclarative
+ : WasmElemSegment::kStatusPassive
+ : WasmElemSegment::kStatusActive;
+ if (status == WasmElemSegment::kStatusDeclarative &&
!enabled_features_.has_reftypes()) {
error(
"Declarative element segments require --experimental-wasm-reftypes");
- return;
- }
- if ((flag & kFullMask) != flag) {
- errorf(pos, "illegal flag value %u. Must be between 0 and 7", flag);
+ return {};
}
+ const bool is_active = status == WasmElemSegment::kStatusActive;
- if (has_table_index) {
- *table_index = consume_u32v("table index");
- } else {
- *table_index = 0;
- }
+ *expressions_as_elements = flag & kExpressionsAsElementsMask;
- if (*status == WasmElemSegment::kStatusActive) {
- *offset = consume_init_expr(module_.get(), kWasmI32,
- module_.get()->globals.size());
- if (offset->kind() == WasmInitExpr::kNone) {
- // Failed to parse offset initializer, return early.
- return;
- }
+ const bool has_table_index =
+ is_active && (flag & kHasTableIndexOrIsDeclarativeMask);
+ uint32_t table_index = has_table_index ? consume_u32v("table index") : 0;
+ if (is_active && table_index >= module_->tables.size()) {
+ errorf(pos, "out of bounds%s table index %u",
+ has_table_index ? " implicit" : "", table_index);
+ return {};
}
-
- if (*status == WasmElemSegment::kStatusActive && !has_table_index) {
- // Active segments without table indices are a special case for backwards
- // compatibility. These cases have an implicit element kind or element
- // type, so we are done already with the segment header.
- *type = kWasmFuncRef;
- return;
+ ValueType table_type =
+ is_active ? module_->tables[table_index].type : kWasmBottom;
+
+ WasmInitExpr offset;
+ if (is_active) {
+ offset = consume_init_expr(module_.get(), kWasmI32,
+ module_.get()->globals.size());
+ // Failed to parse offset initializer, return early.
+ if (failed()) return {};
+ }
+
+ // Denotes an active segment without table index, type, or element kind.
+ const bool backwards_compatible_mode =
+ is_active && !(flag & kHasTableIndexOrIsDeclarativeMask);
+ ValueType type;
+ if (*expressions_as_elements) {
+ type =
+ backwards_compatible_mode ? kWasmFuncRef : consume_reference_type();
+ if (is_active && !IsSubtypeOf(type, table_type, this->module_.get())) {
+ errorf(pos,
+ "Element segment of type %s is not a subtype of referenced "
+ "table %u (of type %s)",
+ type.name().c_str(), table_index, table_type.name().c_str());
+ return {};
+ }
+ } else {
+ if (!backwards_compatible_mode) {
+ // We have to check that there is an element kind of type Function. All
+ // other element kinds are not valid yet.
+ uint8_t val = consume_u8("element kind");
+ if (static_cast<ImportExportKindCode>(val) != kExternalFunction) {
+ errorf(pos, "illegal element kind 0x%x. Must be 0x%x", val,
+ kExternalFunction);
+ return {};
+ }
+ }
+ if (!is_active) {
+ // Declarative and passive segments without explicit type are funcref.
+ type = kWasmFuncRef;
+ } else {
+ type = table_type;
+ // Active segments with function indices must reference a function
+ // table. TODO(7748): Add support for anyref tables when we have them.
+ if (!IsSubtypeOf(table_type, kWasmFuncRef, this->module_.get())) {
+ errorf(pos,
+ "An active element segment with function indices as elements "
+ "must reference a table of %s. Instead, table %u of type %s "
+ "is referenced.",
+ enabled_features_.has_typed_funcref()
+ ? "a subtype of type funcref"
+ : "type funcref",
+ table_index, table_type.name().c_str());
+ return {};
+ }
+ }
}
- if (*functions_as_elements) {
- *type = consume_reference_type();
+ if (is_active) {
+ return {type, table_index, std::move(offset)};
} else {
- // We have to check that there is an element kind of type Function. All
- // other element kinds are not valid yet.
- uint8_t val = consume_u8("element kind");
- ImportExportKindCode kind = static_cast<ImportExportKindCode>(val);
- if (kind != kExternalFunction) {
- errorf(pos, "illegal element kind %x. Must be 0x00", val);
- return;
- }
- *type = kWasmFuncRef;
+ return {type, status == WasmElemSegment::kStatusDeclarative};
}
}
@@ -2091,32 +2188,49 @@ class ModuleDecoderImpl : public Decoder {
func->declared = true;
DCHECK_NE(func, nullptr);
DCHECK_EQ(index, func->func_index);
- DCHECK_NE(index, WasmElemSegment::kNullIndex);
return index;
}
- uint32_t consume_element_expr() {
- uint32_t index = WasmElemSegment::kNullIndex;
+ // TODO(manoskouk): When reftypes lands, remove this and use
+ // consume_init_expr() instead.
+ WasmInitExpr consume_element_expr() {
uint8_t opcode = consume_u8("element opcode");
- if (failed()) return index;
+ if (failed()) return {};
switch (opcode) {
case kExprRefNull: {
HeapTypeImmediate<kFullValidation> imm(WasmFeatures::All(), this,
this->pc(), module_.get());
consume_bytes(imm.length, "ref.null immediate");
- index = WasmElemSegment::kNullIndex;
- break;
+ expect_u8("end opcode", kExprEnd);
+ return WasmInitExpr::RefNullConst(imm.type.representation());
+ }
+ case kExprRefFunc: {
+ uint32_t index = consume_element_func_index();
+ if (failed()) return {};
+ expect_u8("end opcode", kExprEnd);
+ return WasmInitExpr::RefFuncConst(index);
+ }
+ case kExprGlobalGet: {
+ if (!enabled_features_.has_reftypes()) {
+ errorf(
+ "Unexpected opcode 0x%x in element. Enable with "
+ "--experimental-wasm-reftypes",
+ kExprGlobalGet);
+ return {};
+ }
+ uint32_t index = this->consume_u32v("global index");
+ if (failed()) return {};
+ if (index >= module_->globals.size()) {
+ errorf("Out-of-bounds global index %d", index);
+ return {};
+ }
+ expect_u8("end opcode", kExprEnd);
+ return WasmInitExpr::GlobalGet(index);
}
- case kExprRefFunc:
- index = consume_element_func_index();
- if (failed()) return index;
- break;
default:
error("invalid opcode in element");
- break;
+ return {};
}
- expect_u8("end opcode", kExprEnd);
- return index;
}
};
@@ -2374,8 +2488,7 @@ bool FindNameSection(Decoder* decoder) {
} // namespace
void DecodeFunctionNames(const byte* module_start, const byte* module_end,
- std::unordered_map<uint32_t, WireBytesRef>* names,
- const Vector<const WasmExport> export_table) {
+ std::unordered_map<uint32_t, WireBytesRef>* names) {
DCHECK_NOT_NULL(names);
DCHECK(names->empty());
@@ -2407,13 +2520,6 @@ void DecodeFunctionNames(const byte* module_start, const byte* module_end,
}
}
}
-
- // Extract from export table.
- for (const WasmExport& exp : export_table) {
- if (exp.kind == kExternalFunction && names->count(exp.index) == 0) {
- names->insert(std::make_pair(exp.index, exp.name));
- }
- }
}
NameMap DecodeNameMap(Vector<const uint8_t> module_bytes,
diff --git a/chromium/v8/src/wasm/module-decoder.h b/chromium/v8/src/wasm/module-decoder.h
index 2d33f51f319..2af2760ab4e 100644
--- a/chromium/v8/src/wasm/module-decoder.h
+++ b/chromium/v8/src/wasm/module-decoder.h
@@ -187,13 +187,11 @@ V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
// function.
AsmJsOffsetsResult DecodeAsmJsOffsets(Vector<const uint8_t> encoded_offsets);
-// Decode the function names from the name section and also look at export
-// table. Returns the result as an unordered map. Only names with valid utf8
-// encoding are stored and conflicts are resolved by choosing the last name
-// read.
+// Decode the function names from the name section. Returns the result as an
+// unordered map. Only names with valid utf8 encoding are stored and conflicts
+// are resolved by choosing the last name read.
void DecodeFunctionNames(const byte* module_start, const byte* module_end,
- std::unordered_map<uint32_t, WireBytesRef>* names,
- const Vector<const WasmExport> export_table);
+ std::unordered_map<uint32_t, WireBytesRef>* names);
// Decode the requested subsection of the name section.
// The result will be empty if no name section is present. On encountering an
diff --git a/chromium/v8/src/wasm/module-instantiate.cc b/chromium/v8/src/wasm/module-instantiate.cc
index f64a657eb8e..7945e798499 100644
--- a/chromium/v8/src/wasm/module-instantiate.cc
+++ b/chromium/v8/src/wasm/module-instantiate.cc
@@ -20,6 +20,7 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-subtyping.h"
+#include "src/wasm/wasm-value.h"
#define TRACE(...) \
do { \
@@ -122,16 +123,17 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
int struct_index, Handle<Map> opt_rtt_parent) {
const wasm::StructType* type = module->struct_type(struct_index);
const int inobject_properties = 0;
- DCHECK_LE(type->total_fields_size(), kMaxInt - WasmStruct::kHeaderSize);
- const int instance_size =
- WasmStruct::kHeaderSize + static_cast<int>(type->total_fields_size());
+ // We have to use the variable size sentinel because the instance size
+ // stored directly in a Map is capped at 255 pointer sizes.
+ const int map_instance_size = kVariableSizeSentinel;
+ const int real_instance_size = WasmStruct::Size(type);
const InstanceType instance_type = WASM_STRUCT_TYPE;
// TODO(jkummerow): If NO_ELEMENTS were supported, we could use that here.
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), opt_rtt_parent);
+ reinterpret_cast<Address>(type), opt_rtt_parent, real_instance_size);
Handle<Map> map = isolate->factory()->NewMap(
- instance_type, instance_size, elements_kind, inobject_properties);
+ instance_type, map_instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
return map;
}
@@ -141,10 +143,12 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
const wasm::ArrayType* type = module->array_type(array_index);
const int inobject_properties = 0;
const int instance_size = kVariableSizeSentinel;
+ // Wasm Arrays don't have a static instance size.
+ const int cached_instance_size = 0;
const InstanceType instance_type = WASM_ARRAY_TYPE;
const ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND;
Handle<WasmTypeInfo> type_info = isolate->factory()->NewWasmTypeInfo(
- reinterpret_cast<Address>(type), opt_rtt_parent);
+ reinterpret_cast<Address>(type), opt_rtt_parent, cached_instance_size);
Handle<Map> map = isolate->factory()->NewMap(
instance_type, instance_size, elements_kind, inobject_properties);
map->set_wasm_type_info(*type_info);
@@ -615,7 +619,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// list.
//--------------------------------------------------------------------------
if (enabled_.has_gc()) {
- Handle<FixedArray> maps = isolate_->factory()->NewUninitializedFixedArray(
+ Handle<FixedArray> maps = isolate_->factory()->NewFixedArray(
static_cast<int>(module_->type_kinds.size()));
for (int map_index = 0;
map_index < static_cast<int>(module_->type_kinds.size());
@@ -1325,11 +1329,15 @@ bool InstanceBuilder::ProcessImportedGlobal(Handle<WasmInstanceObject> instance,
// TODO(wasm): Still observable if Function.prototype.valueOf or friends
// are patched, we might need to check for that as well.
if (value->IsJSFunction()) value = isolate_->factory()->nan_value();
- if (value->IsPrimitive() && !value->IsSymbol()) {
- if (global.type == kWasmI32) {
- value = Object::ToInt32(isolate_, value).ToHandleChecked();
- } else {
- value = Object::ToNumber(isolate_, value).ToHandleChecked();
+ if (value->IsPrimitive()) {
+ MaybeHandle<Object> converted = global.type == kWasmI32
+ ? Object::ToInt32(isolate_, value)
+ : Object::ToNumber(isolate_, value);
+ if (!converted.ToHandle(&value)) {
+ // Conversion is known to fail for Symbols and BigInts.
+ ReportLinkError("global import must be a number", import_index,
+ module_name, import_name);
+ return false;
}
}
}
@@ -1903,10 +1911,10 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
const WasmModule* module = instance->module();
for (size_t i = 0; i < count; ++i) {
- uint32_t func_index = elem_segment.entries[src + i];
+ const WasmInitExpr* init = &elem_segment.entries[src + i];
int entry_index = static_cast<int>(dst + i);
- if (func_index == WasmElemSegment::kNullIndex) {
+ if (init->kind() == WasmInitExpr::kRefNullConst) {
if (IsSubtypeOf(table_object->type(), kWasmFuncRef, module)) {
IndirectFunctionTableEntry(instance, table_index, entry_index).clear();
}
@@ -1915,6 +1923,18 @@ bool LoadElemSegmentImpl(Isolate* isolate, Handle<WasmInstanceObject> instance,
continue;
}
+ if (init->kind() == WasmInitExpr::kGlobalGet) {
+ WasmTableObject::Set(
+ isolate, table_object, entry_index,
+ WasmInstanceObject::GetGlobalValue(
+ instance, module->globals[init->immediate().index])
+ .to_ref());
+ continue;
+ }
+
+ DCHECK_EQ(init->kind(), WasmInitExpr::kRefFuncConst);
+
+ const uint32_t func_index = init->immediate().index;
const WasmFunction* function = &module->functions[func_index];
// Update the local dispatch table first if necessary.
diff --git a/chromium/v8/src/wasm/value-type.h b/chromium/v8/src/wasm/value-type.h
index 983e2090b64..7895a731f6a 100644
--- a/chromium/v8/src/wasm/value-type.h
+++ b/chromium/v8/src/wasm/value-type.h
@@ -94,7 +94,7 @@ class HeapType {
}
explicit constexpr HeapType(Representation repr) : representation_(repr) {
- CONSTEXPR_DCHECK(is_bottom() || is_valid());
+ DCHECK(is_bottom() || is_valid());
}
explicit constexpr HeapType(uint32_t repr)
: HeapType(static_cast<Representation>(repr)) {}
@@ -116,7 +116,7 @@ class HeapType {
constexpr Representation representation() const { return representation_; }
constexpr uint32_t ref_index() const {
- CONSTEXPR_DCHECK(is_index());
+ DCHECK(is_index());
return representation_;
}
@@ -201,7 +201,7 @@ constexpr int element_size_log2(ValueKind kind) {
};
int size_log_2 = kElementSizeLog2[kind];
- CONSTEXPR_DCHECK(size_log_2 >= 0);
+ DCHECK_LE(0, size_log_2);
return size_log_2;
}
@@ -214,7 +214,7 @@ constexpr int element_size_bytes(ValueKind kind) {
};
int size = kElementSize[kind];
- CONSTEXPR_DCHECK(size > 0);
+ DCHECK_LT(0, size);
return size;
}
@@ -240,7 +240,7 @@ constexpr const char* name(ValueKind kind) {
}
constexpr MachineType machine_type(ValueKind kind) {
- CONSTEXPR_DCHECK(kBottom != kind);
+ DCHECK_NE(kBottom, kind);
constexpr MachineType kMachineType[] = {
#define MACH_TYPE(kind, log2Size, code, machineType, ...) \
@@ -262,7 +262,7 @@ constexpr bool is_rtt(ValueKind kind) {
}
constexpr bool is_defaultable(ValueKind kind) {
- CONSTEXPR_DCHECK(kind != kBottom && kind != kVoid);
+ DCHECK(kind != kBottom && kind != kVoid);
return kind != kRef && !is_rtt(kind);
}
@@ -277,11 +277,11 @@ class ValueType {
/******************************* Constructors *******************************/
constexpr ValueType() : bit_field_(KindField::encode(kVoid)) {}
static constexpr ValueType Primitive(ValueKind kind) {
- CONSTEXPR_DCHECK(kind == kBottom || kind <= kI16);
+ DCHECK(kind == kBottom || kind <= kI16);
return ValueType(KindField::encode(kind));
}
static constexpr ValueType Ref(uint32_t heap_type, Nullability nullability) {
- CONSTEXPR_DCHECK(HeapType(heap_type).is_valid());
+ DCHECK(HeapType(heap_type).is_valid());
return ValueType(
KindField::encode(nullability == kNullable ? kOptRef : kRef) |
HeapTypeField::encode(heap_type));
@@ -291,14 +291,14 @@ class ValueType {
}
static constexpr ValueType Rtt(uint32_t type_index) {
- CONSTEXPR_DCHECK(HeapType(type_index).is_index());
+ DCHECK(HeapType(type_index).is_index());
return ValueType(KindField::encode(kRtt) |
HeapTypeField::encode(type_index));
}
static constexpr ValueType Rtt(uint32_t type_index,
uint8_t inheritance_depth) {
- CONSTEXPR_DCHECK(HeapType(type_index).is_index());
+ DCHECK(HeapType(type_index).is_index());
return ValueType(KindField::encode(kRttWithDepth) |
HeapTypeField::encode(type_index) |
DepthField::encode(inheritance_depth));
@@ -340,27 +340,34 @@ class ValueType {
return is_packed() ? Primitive(kI32) : *this;
}
+ // Returns the version of this type that does not allow null values. Handles
+ // bottom.
+ constexpr ValueType AsNonNull() const {
+ DCHECK(is_object_reference() || is_bottom());
+ return is_nullable() ? Ref(heap_type(), kNonNullable) : *this;
+ }
+
/***************************** Field Accessors ******************************/
constexpr ValueKind kind() const { return KindField::decode(bit_field_); }
constexpr HeapType::Representation heap_representation() const {
- CONSTEXPR_DCHECK(is_object_reference());
+ DCHECK(is_object_reference());
return static_cast<HeapType::Representation>(
HeapTypeField::decode(bit_field_));
}
constexpr HeapType heap_type() const {
- CONSTEXPR_DCHECK(is_object_reference());
+ DCHECK(is_object_reference());
return HeapType(heap_representation());
}
constexpr uint8_t depth() const {
- CONSTEXPR_DCHECK(has_depth());
+ DCHECK(has_depth());
return DepthField::decode(bit_field_);
}
constexpr uint32_t ref_index() const {
- CONSTEXPR_DCHECK(has_index());
+ DCHECK(has_index());
return HeapTypeField::decode(bit_field_);
}
constexpr Nullability nullability() const {
- CONSTEXPR_DCHECK(is_object_reference());
+ DCHECK(is_object_reference());
return kind() == kOptRef ? kNullable : kNonNullable;
}
@@ -426,7 +433,7 @@ class ValueType {
// (e.g., Ref(HeapType::kFunc, kNullable).value_type_code will return
// kFuncrefCode and not kOptRefCode).
constexpr ValueTypeCode value_type_code() const {
- CONSTEXPR_DCHECK(kind() != kBottom);
+ DCHECK_NE(kBottom, kind());
switch (kind()) {
case kOptRef:
switch (heap_representation()) {
diff --git a/chromium/v8/src/wasm/wasm-code-manager.cc b/chromium/v8/src/wasm/wasm-code-manager.cc
index d9225103bbd..8907cbab313 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.cc
+++ b/chromium/v8/src/wasm/wasm-code-manager.cc
@@ -26,6 +26,7 @@
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
+#include "src/wasm/memory-protection-key.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
@@ -225,8 +226,8 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
ModuleWireBytes wire_bytes(native_module_->wire_bytes());
const WasmModule* module = native_module_->module();
- WireBytesRef name_ref = module->lazily_generated_names.LookupFunctionName(
- wire_bytes, index(), VectorOf(module->export_table));
+ WireBytesRef name_ref =
+ module->lazily_generated_names.LookupFunctionName(wire_bytes, index());
WasmName name = wire_bytes.GetNameOrNull(name_ref);
const WasmDebugSymbols& debug_symbols = module->debug_symbols;
@@ -502,28 +503,14 @@ int WasmCode::GetSourcePositionBefore(int offset) {
return position;
}
-WasmCodeAllocator::OptionalLock::~OptionalLock() {
- if (allocator_) allocator_->mutex_.Unlock();
-}
-
-void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
- DCHECK(!is_locked());
- allocator_ = allocator;
- allocator->mutex_.Lock();
-}
-
// static
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
- VirtualMemory code_space,
std::shared_ptr<Counters> async_counters)
: code_manager_(code_manager),
- free_code_space_(code_space.region()),
async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
- owned_code_space_.emplace_back(std::move(code_space));
- async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
WasmCodeAllocator::~WasmCodeAllocator() {
@@ -531,9 +518,12 @@ WasmCodeAllocator::~WasmCodeAllocator() {
committed_code_space());
}
-void WasmCodeAllocator::Init(NativeModule* native_module) {
- DCHECK_EQ(1, owned_code_space_.size());
- native_module->AddCodeSpace(owned_code_space_[0].region(), {});
+void WasmCodeAllocator::Init(VirtualMemory code_space) {
+ DCHECK(owned_code_space_.empty());
+ DCHECK(free_code_space_.IsEmpty());
+ free_code_space_.Merge(code_space.region());
+ owned_code_space_.emplace_back(std::move(code_space));
+ async_counters_->wasm_module_num_code_spaces()->AddSample(1);
}
namespace {
@@ -625,18 +615,11 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
Vector<byte> WasmCodeAllocator::AllocateForCode(NativeModule* native_module,
size_t size) {
- return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion,
- WasmCodeAllocator::OptionalLock{});
+ return AllocateForCodeInRegion(native_module, size, kUnrestrictedRegion);
}
Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
- NativeModule* native_module, size_t size, base::AddressRegion region,
- const WasmCodeAllocator::OptionalLock& optional_lock) {
- OptionalLock new_lock;
- if (!optional_lock.is_locked()) new_lock.Lock(this);
- const auto& locked_lock =
- optional_lock.is_locked() ? optional_lock : new_lock;
- DCHECK(locked_lock.is_locked());
+ NativeModule* native_module, size_t size, base::AddressRegion region) {
DCHECK_EQ(code_manager_, native_module->engine()->code_manager());
DCHECK_LT(0, size);
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
@@ -667,7 +650,7 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
code_manager_->AssignRange(new_region, native_module);
free_code_space_.Merge(new_region);
owned_code_space_.emplace_back(std::move(new_mem));
- native_module->AddCodeSpace(new_region, locked_lock);
+ native_module->AddCodeSpaceLocked(new_region);
code_space = free_code_space_.Allocate(size);
DCHECK(!code_space.is_empty());
@@ -703,16 +686,40 @@ Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}
-bool WasmCodeAllocator::SetExecutable(bool executable) {
- base::MutexGuard lock(&mutex_);
- if (is_executable_ == executable) return true;
- TRACE_HEAP("Setting module %p as executable: %d.\n", this, executable);
-
- v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+// TODO(dlehmann): Do not return the success as a bool, but instead fail hard.
+// That is, pull the CHECK from {NativeModuleModificationScope} in here and
+// return void.
+// TODO(dlehmann): Ensure {SetWritable(true)} is always paired up with a
+// {SetWritable(false)}, such that eventually the code space is write protected.
+// One solution is to make the API foolproof by hiding {SetWritable()} and
+// allowing change of permissions only through {NativeModuleModificationScope}.
+// TODO(dlehmann): Add tests that ensure the code space is eventually write-
+// protected.
+bool WasmCodeAllocator::SetWritable(bool writable) {
+ // Invariant: `this.writers_count_ > 0` iff `code space has W permission`.
+ // TODO(dlehmann): This is currently not fulfilled before the first call
+ // to SetWritable(false), because initial permissions are RWX.
+ // Fix by setting initial permissions to RX and adding writable permission
+ // where appropriate. See also {WasmCodeManager::Commit()}.
+ if (writable) {
+ if (++writers_count_ > 1) return true;
+ } else {
+ DCHECK_GT(writers_count_, 0);
+ if (--writers_count_ > 0) return true;
+ }
+ writable = writers_count_ > 0;
+ TRACE_HEAP("Setting module %p as writable: %d.\n", this, writable);
if (FLAG_wasm_write_protect_code_memory) {
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+
+ // Due to concurrent compilation and execution, we always need the execute
+ // permission, however during codegen we additionally need to write.
+ // Hence this does not actually achieve write-xor-execute, but merely
+ // "always-execute" with "no-write-eventually".
PageAllocator::Permission permission =
- executable ? PageAllocator::kReadExecute : PageAllocator::kReadWrite;
+ writable ? PageAllocator::kReadWriteExecute
+ : PageAllocator::kReadExecute;
#if V8_OS_WIN
// On windows, we need to switch permissions per separate virtual memory
// reservation.
@@ -725,8 +732,8 @@ bool WasmCodeAllocator::SetExecutable(bool executable) {
permission)) {
return false;
}
- TRACE_HEAP("Set %p:%p to executable:%d\n", vmem.address(), vmem.end(),
- executable);
+ TRACE_HEAP("Set %p:%p to writable:%d\n", vmem.address(), vmem.end(),
+ writable);
}
#else // V8_OS_WIN
size_t commit_page_size = page_allocator->CommitPageSize();
@@ -738,21 +745,46 @@ bool WasmCodeAllocator::SetExecutable(bool executable) {
permission)) {
return false;
}
- TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to executable:%d\n",
- region.begin(), region.end(), executable);
+ TRACE_HEAP("Set 0x%" PRIxPTR ":0x%" PRIxPTR " to writable:%d\n",
+ region.begin(), region.end(), writable);
}
#endif // V8_OS_WIN
}
- is_executable_ = executable;
return true;
}
+bool WasmCodeAllocator::SetThreadWritable(bool writable) {
+ static thread_local int writable_nesting_level = 0;
+ if (writable) {
+ if (++writable_nesting_level > 1) return true;
+ } else {
+ DCHECK_GT(writable_nesting_level, 0);
+ if (--writable_nesting_level > 0) return true;
+ }
+ writable = writable_nesting_level > 0;
+
+ int key = code_manager_->memory_protection_key_;
+
+ MemoryProtectionKeyPermission permissions =
+ writable ? kNoRestrictions : kDisableWrite;
+
+ TRACE_HEAP("Setting memory protection key %d to writable: %d.\n", key,
+ writable);
+ return SetPermissionsForMemoryProtectionKey(key, permissions);
+}
+
void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions;
size_t code_size = 0;
CODE_SPACE_WRITE_SCOPE
for (WasmCode* code : codes) {
+ // TODO(dlehmann): Pull the {NativeModuleModificationScope} out of the loop.
+ // However, its constructor requires a {NativeModule}.
+ // Can be fixed if {NativeModuleModificationScope()} is changed to take
+ // only a {WasmCodeAllocator} in its constructor.
+ NativeModuleModificationScope native_module_modification_scope(
+ code->native_module());
ZapCode(code->instruction_start(), code->instructions().size());
FlushInstructionCache(code->instruction_start(),
code->instructions().size());
@@ -768,19 +800,16 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
DisjointAllocationPool regions_to_decommit;
PageAllocator* allocator = GetPlatformPageAllocator();
size_t commit_page_size = allocator->CommitPageSize();
- {
- base::MutexGuard guard(&mutex_);
- for (auto region : freed_regions.regions()) {
- auto merged_region = freed_code_space_.Merge(region);
- Address discard_start =
- std::max(RoundUp(merged_region.begin(), commit_page_size),
- RoundDown(region.begin(), commit_page_size));
- Address discard_end =
- std::min(RoundDown(merged_region.end(), commit_page_size),
- RoundUp(region.end(), commit_page_size));
- if (discard_start >= discard_end) continue;
- regions_to_decommit.Merge({discard_start, discard_end - discard_start});
- }
+ for (auto region : freed_regions.regions()) {
+ auto merged_region = freed_code_space_.Merge(region);
+ Address discard_start =
+ std::max(RoundUp(merged_region.begin(), commit_page_size),
+ RoundDown(region.begin(), commit_page_size));
+ Address discard_end =
+ std::min(RoundDown(merged_region.end(), commit_page_size),
+ RoundUp(region.end(), commit_page_size));
+ if (discard_start >= discard_end) continue;
+ regions_to_decommit.Merge({discard_start, discard_end - discard_start});
}
for (auto region : regions_to_decommit.regions()) {
@@ -795,7 +824,6 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
}
size_t WasmCodeAllocator::GetNumCodeSpaces() const {
- base::MutexGuard lock(&mutex_);
return owned_code_space_.size();
}
@@ -809,8 +837,7 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::shared_ptr<NativeModule>* shared_this)
: engine_(engine),
engine_scope_(engine->GetBarrierForBackgroundCompile()->TryLock()),
- code_allocator_(engine->code_manager(), std::move(code_space),
- async_counters),
+ code_allocator_(engine->code_manager(), async_counters),
enabled_features_(enabled),
module_(std::move(module)),
import_wrapper_cache_(std::unique_ptr<WasmImportWrapperCache>(
@@ -838,7 +865,14 @@ NativeModule::NativeModule(WasmEngine* engine, const WasmFeatures& enabled,
std::fill_n(num_liftoff_function_calls_.get(),
module_->num_declared_functions, kCounterStart);
}
- code_allocator_.Init(this);
+ // Even though there cannot be another thread using this object (since we are
+ // just constructing it), we need to hold the mutex to fulfill the
+ // precondition of {WasmCodeAllocator::Init}, which calls
+ // {NativeModule::AddCodeSpaceLocked}.
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ auto initial_region = code_space.region();
+ code_allocator_.Init(std::move(code_space));
+ AddCodeSpaceLocked(initial_region);
}
void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
@@ -852,16 +886,13 @@ void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {
code_table_ = std::move(new_table);
base::AddressRegion single_code_space_region;
- {
- base::MutexGuard guard(&allocation_mutex_);
- CHECK_EQ(1, code_space_data_.size());
- single_code_space_region = code_space_data_[0].region;
- }
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
+ CHECK_EQ(1, code_space_data_.size());
+ single_code_space_region = code_space_data_[0].region;
// Re-allocate jump table.
- main_jump_table_ = CreateEmptyJumpTableInRegion(
+ main_jump_table_ = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfSlots(max_functions),
- single_code_space_region, WasmCodeAllocator::OptionalLock{});
- base::MutexGuard guard(&allocation_mutex_);
+ single_code_space_region);
code_space_data_[0].jump_table = main_jump_table_;
}
@@ -879,7 +910,7 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
for (auto& owned_entry : owned_code_) {
owned_entry.second->LogCode(isolate, source_url.get(), script.id());
}
@@ -890,11 +921,12 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
CompilationEnv NativeModule::CreateCompilationEnv() const {
return {module(), use_trap_handler_, kRuntimeExceptionSupport,
- enabled_features_, kNoLowerSimd};
+ enabled_features_};
}
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
const size_t relocation_size = code->relocation_size();
OwnedVector<byte> reloc_info;
if (relocation_size > 0) {
@@ -929,6 +961,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
const int constant_pool_offset = base_offset + code->constant_pool_offset();
const int code_comments_offset = base_offset + code->code_comments_offset();
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size());
base::Memcpy(dst_code_bytes.begin(), instructions.begin(),
@@ -940,7 +973,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
int mode_mask =
RelocInfo::kApplyMask | RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL);
auto jump_tables_ref =
- FindJumpTablesForRegion(base::AddressRegionOf(dst_code_bytes));
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(dst_code_bytes));
Address dst_code_addr = reinterpret_cast<Address>(dst_code_bytes.begin());
Address constant_pool_start = dst_code_addr + constant_pool_offset;
RelocIterator orig_it(*code, mode_mask);
@@ -982,7 +1015,7 @@ WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
new_code->MaybePrint();
new_code->Validate();
- return PublishCode(std::move(new_code));
+ return PublishCodeLocked(std::move(new_code));
}
void NativeModule::UseLazyStub(uint32_t func_index) {
@@ -990,25 +1023,24 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
DCHECK_LT(func_index,
module_->num_imported_functions + module_->num_declared_functions);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
- base::AddressRegion single_code_space_region;
- {
- base::MutexGuard guard(&allocation_mutex_);
- DCHECK_EQ(1, code_space_data_.size());
- single_code_space_region = code_space_data_[0].region;
- }
- lazy_compile_table_ = CreateEmptyJumpTableInRegion(
+ NativeModuleModificationScope native_module_modification_scope(this);
+ DCHECK_EQ(1, code_space_data_.size());
+ base::AddressRegion single_code_space_region = code_space_data_[0].region;
+ lazy_compile_table_ = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfLazyFunctions(num_slots),
- single_code_space_region, WasmCodeAllocator::OptionalLock{});
+ single_code_space_region);
JumpTableAssembler::GenerateLazyCompileTable(
lazy_compile_table_->instruction_start(), num_slots,
module_->num_imported_functions,
- GetNearRuntimeStubEntry(WasmCode::kWasmCompileLazy,
- FindJumpTablesForRegion(base::AddressRegionOf(
- lazy_compile_table_->instructions()))));
+ GetNearRuntimeStubEntry(
+ WasmCode::kWasmCompileLazy,
+ FindJumpTablesForRegionLocked(
+ base::AddressRegionOf(lazy_compile_table_->instructions()))));
}
// Add jump table entry for jump to the lazy compile stub.
@@ -1017,7 +1049,6 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
Address lazy_compile_target =
lazy_compile_table_->instruction_start() +
JumpTableAssembler::LazyCompileSlotIndexToOffset(slot_index);
- base::MutexGuard guard(&allocation_mutex_);
PatchJumpTablesLocked(slot_index, lazy_compile_target);
}
@@ -1026,10 +1057,14 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
int tagged_parameter_slots, Vector<const byte> protected_instructions_data,
Vector<const byte> source_position_table, WasmCode::Kind kind,
ExecutionTier tier, ForDebugging for_debugging) {
- Vector<byte> code_space =
- code_allocator_.AllocateForCode(this, desc.instr_size);
- auto jump_table_ref =
- FindJumpTablesForRegion(base::AddressRegionOf(code_space));
+ Vector<byte> code_space;
+ NativeModule::JumpTablesRef jump_table_ref;
+ {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
+ jump_table_ref =
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
+ }
return AddCodeWithCodeSpace(index, desc, stack_slots, tagged_parameter_slots,
protected_instructions_data,
source_position_table, kind, tier, for_debugging,
@@ -1057,6 +1092,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
const int instr_size = desc.instr_size;
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
base::Memcpy(dst_code_bytes.begin(), desc.buffer,
static_cast<size_t>(desc.instr_size));
@@ -1107,7 +1143,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.PublishCode");
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
return PublishCodeLocked(std::move(code));
}
@@ -1117,7 +1153,7 @@ std::vector<WasmCode*> NativeModule::PublishCode(
"wasm.PublishCode", "number", codes.size());
std::vector<WasmCode*> published_code;
published_code.reserve(codes.size());
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
// The published code is put into the top-most surrounding {WasmCodeRefScope}.
for (auto& code : codes) {
published_code.push_back(PublishCodeLocked(std::move(code)));
@@ -1138,8 +1174,7 @@ WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {
WasmCode* NativeModule::PublishCodeLocked(
std::unique_ptr<WasmCode> owned_code) {
- // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
WasmCode* code = owned_code.get();
new_owned_code_.emplace_back(std::move(owned_code));
@@ -1148,7 +1183,7 @@ WasmCode* NativeModule::PublishCodeLocked(
// guaranteed to be valid.
WasmCodeRefScope::AddRef(code);
- if (code->IsAnonymous() || code->index() < module_->num_imported_functions) {
+ if (code->index() < static_cast<int>(module_->num_imported_functions)) {
return code;
}
@@ -1208,7 +1243,7 @@ WasmCode* NativeModule::PublishCodeLocked(
}
void NativeModule::ReinstallDebugCode(WasmCode* code) {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
DCHECK_EQ(this, code->native_module());
DCHECK_EQ(kWithBreakpoints, code->for_debugging());
@@ -1230,9 +1265,14 @@ void NativeModule::ReinstallDebugCode(WasmCode* code) {
PatchJumpTablesLocked(slot_idx, code->instruction_start());
}
-Vector<uint8_t> NativeModule::AllocateForDeserializedCode(
- size_t total_code_size) {
- return code_allocator_.AllocateForCode(this, total_code_size);
+std::pair<Vector<uint8_t>, NativeModule::JumpTablesRef>
+NativeModule::AllocateForDeserializedCode(size_t total_code_size) {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ Vector<uint8_t> code_space =
+ code_allocator_.AllocateForCode(this, total_code_size);
+ auto jump_tables =
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
+ return {code_space, jump_tables};
}
std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
@@ -1253,7 +1293,7 @@ std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
}
std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
WasmCode** start = code_table_.get();
WasmCode** end = start + module_->num_declared_functions;
for (WasmCode* code : VectorOf(start, end - start)) {
@@ -1263,19 +1303,19 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
}
WasmCode* NativeModule::GetCode(uint32_t index) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
WasmCode* code = code_table_[declared_function_index(module(), index)];
if (code) WasmCodeRefScope::AddRef(code);
return code;
}
bool NativeModule::HasCode(uint32_t index) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
return code_table_[declared_function_index(module(), index)] != nullptr;
}
bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
return code_table_[declared_function_index(module(), index)] != nullptr &&
code_table_[declared_function_index(module(), index)]->tier() == tier;
}
@@ -1289,16 +1329,17 @@ WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {
return source_map_.get();
}
-WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
- int jump_table_size, base::AddressRegion region,
- const WasmCodeAllocator::OptionalLock& allocator_lock) {
+WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
+ int jump_table_size, base::AddressRegion region) {
+ allocation_mutex_.AssertHeld();
// Only call this if we really need a jump table.
DCHECK_LT(0, jump_table_size);
- Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
- this, jump_table_size, region, allocator_lock);
+ Vector<uint8_t> code_space =
+ code_allocator_.AllocateForCodeInRegion(this, jump_table_size, region);
DCHECK(!code_space.empty());
UpdateCodeSize(jump_table_size, ExecutionTier::kNone, kNoDebugging);
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module
@@ -1317,7 +1358,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
WasmCode::kJumpTable, // kind
ExecutionTier::kNone, // tier
kNoDebugging}}; // for_debugging
- return PublishCode(std::move(code));
+ return PublishCodeLocked(std::move(code));
}
void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
@@ -1330,10 +1371,10 @@ void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
}
void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
- // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.jump_table) continue;
@@ -1343,8 +1384,7 @@ void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
uint32_t slot_index, Address target) {
- // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
DCHECK_NOT_NULL(code_space_data.jump_table);
DCHECK_NOT_NULL(code_space_data.far_jump_table);
@@ -1369,9 +1409,9 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
target);
}
-void NativeModule::AddCodeSpace(
- base::AddressRegion region,
- const WasmCodeAllocator::OptionalLock& allocator_lock) {
+void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {
+ allocation_mutex_.AssertHeld();
+
// Each code space must be at least twice as large as the overhead per code
// space. Otherwise, we are wasting too much memory.
DCHECK_GE(region.size(),
@@ -1387,8 +1427,8 @@ void NativeModule::AddCodeSpace(
->CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
size_t size = Heap::GetCodeRangeReservedAreaSize();
DCHECK_LT(0, size);
- Vector<byte> padding = code_allocator_.AllocateForCodeInRegion(
- this, size, region, allocator_lock);
+ Vector<byte> padding =
+ code_allocator_.AllocateForCodeInRegion(this, size, region);
CHECK_EQ(reinterpret_cast<Address>(padding.begin()), region.begin());
win64_unwindinfo::RegisterNonABICompliantCodeRange(
reinterpret_cast<void*>(region.begin()), region.size());
@@ -1397,28 +1437,29 @@ void NativeModule::AddCodeSpace(
WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions;
const bool is_first_code_space = code_space_data_.empty();
// We always need a far jump table, because it contains the runtime stubs.
- const bool needs_far_jump_table = !FindJumpTablesForRegion(region).is_valid();
+ const bool needs_far_jump_table =
+ !FindJumpTablesForRegionLocked(region).is_valid();
const bool needs_jump_table = num_wasm_functions > 0 && needs_far_jump_table;
if (needs_jump_table) {
- jump_table = CreateEmptyJumpTableInRegion(
- JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region,
- allocator_lock);
+ jump_table = CreateEmptyJumpTableInRegionLocked(
+ JumpTableAssembler::SizeForNumberOfSlots(num_wasm_functions), region);
CHECK(region.contains(jump_table->instruction_start()));
}
if (needs_far_jump_table) {
int num_function_slots = NumWasmFunctionsInFarJumpTable(num_wasm_functions);
- far_jump_table = CreateEmptyJumpTableInRegion(
+ far_jump_table = CreateEmptyJumpTableInRegionLocked(
JumpTableAssembler::SizeForNumberOfFarJumpSlots(
WasmCode::kRuntimeStubCount,
NumWasmFunctionsInFarJumpTable(num_function_slots)),
- region, allocator_lock);
+ region);
CHECK(region.contains(far_jump_table->instruction_start()));
EmbeddedData embedded_data = EmbeddedData::FromBlob();
#define RUNTIME_STUB(Name) Builtins::k##Name,
@@ -1446,7 +1487,6 @@ void NativeModule::AddCodeSpace(
main_far_jump_table_ = far_jump_table;
}
- base::MutexGuard guard(&allocation_mutex_);
code_space_data_.push_back(CodeSpaceData{region, jump_table, far_jump_table});
if (jump_table && !is_first_code_space) {
@@ -1499,8 +1539,7 @@ void NativeModule::SetWireBytes(OwnedVector<const uint8_t> wire_bytes) {
}
void NativeModule::TransferNewOwnedCodeLocked() const {
- // The caller holds the allocation mutex.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
DCHECK(!new_owned_code_.empty());
// Sort the {new_owned_code_} vector reversed, such that the position of the
// previously inserted element can be used as a hint for the next element. If
@@ -1524,8 +1563,7 @@ void NativeModule::TransferNewOwnedCodeLocked() const {
}
void NativeModule::InsertToCodeCache(WasmCode* code) {
- // The caller holds {allocation_mutex_}.
- DCHECK(!allocation_mutex_.TryLock());
+ allocation_mutex_.AssertHeld();
DCHECK_NOT_NULL(cached_code_);
if (code->IsAnonymous()) return;
// Only cache Liftoff debugging code or TurboFan code (no breakpoints or
@@ -1541,7 +1579,7 @@ void NativeModule::InsertToCodeCache(WasmCode* code) {
}
WasmCode* NativeModule::Lookup(Address pc) const {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
auto iter = owned_code_.upper_bound(pc);
if (iter == owned_code_.begin()) return nullptr;
@@ -1566,8 +1604,9 @@ Address NativeModule::GetCallTargetForFunction(uint32_t func_index) const {
return main_jump_table_->instruction_start() + slot_offset;
}
-NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
+NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
base::AddressRegion code_region) const {
+ allocation_mutex_.AssertHeld();
auto jump_table_usable = [code_region](const WasmCode* jump_table) {
Address table_start = jump_table->instruction_start();
Address table_end = table_start + jump_table->instructions().size();
@@ -1583,18 +1622,6 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
return max_distance <= WasmCodeAllocator::kMaxCodeSpaceSize;
};
- // Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
- // Access to these fields is possible without locking, since these fields are
- // initialized on construction of the {NativeModule}.
- if (main_far_jump_table_ && jump_table_usable(main_far_jump_table_) &&
- (main_jump_table_ == nullptr || jump_table_usable(main_jump_table_))) {
- return {
- main_jump_table_ ? main_jump_table_->instruction_start() : kNullAddress,
- main_far_jump_table_->instruction_start()};
- }
-
- // Otherwise, take the mutex and look for another suitable jump table.
- base::MutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.far_jump_table) continue;
@@ -1645,7 +1672,7 @@ uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
}
WasmCode::RuntimeStubId NativeModule::GetRuntimeStubId(Address target) const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
for (auto& code_space_data : code_space_data_) {
if (code_space_data.far_jump_table != nullptr &&
@@ -1679,10 +1706,22 @@ NativeModule::~NativeModule() {
WasmCodeManager::WasmCodeManager(size_t max_committed)
: max_committed_code_space_(max_committed),
- critical_committed_code_space_(max_committed / 2) {
+ critical_committed_code_space_(max_committed / 2),
+ memory_protection_key_(FLAG_wasm_memory_protection_keys
+ ? AllocateMemoryProtectionKey()
+ : kNoMemoryProtectionKey) {
DCHECK_LE(max_committed, FLAG_wasm_max_code_space * MB);
}
+WasmCodeManager::~WasmCodeManager() {
+ // No more committed code space.
+ DCHECK_EQ(0, total_committed_code_space_.load());
+
+ if (FLAG_wasm_memory_protection_keys) {
+ FreeMemoryProtectionKey(memory_protection_key_);
+ }
+}
+
#if defined(V8_OS_WIN64)
bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() const {
return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
@@ -1711,16 +1750,38 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
break;
}
}
- PageAllocator::Permission permission = FLAG_wasm_write_protect_code_memory
- ? PageAllocator::kReadWrite
- : PageAllocator::kReadWriteExecute;
-
- TRACE_HEAP("Setting rw permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
- region.begin(), region.end());
+ // Even when we employ W^X with FLAG_wasm_write_protect_code_memory == true,
+ // code pages need to be initially allocated with RWX permission because of
+ // concurrent compilation/execution. For this reason there is no distinction
+ // here based on FLAG_wasm_write_protect_code_memory.
+ // TODO(dlehmann): This allocates initially as writable and executable, and
+ // as such is not safe-by-default. In particular, if
+ // {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
+ // because no {NativeModuleModificationScope} is created), the writable
+ // permission is never withdrawn.
+ // One potential fix is to allocate initially with kReadExecute only, which
+ // forces all compilation threads to add the missing
+ // {NativeModuleModificationScope}s before modification; and/or adding
+ // DCHECKs that {NativeModuleModificationScope} is open when calling this
+ // method.
+ PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
+
+ bool success;
+ if (FLAG_wasm_memory_protection_keys) {
+ TRACE_HEAP(
+ "Setting rwx permissions and memory protection key %d for 0x%" PRIxPTR
+ ":0x%" PRIxPTR "\n",
+ memory_protection_key_, region.begin(), region.end());
+ success = SetPermissionsAndMemoryProtectionKey(
+ GetPlatformPageAllocator(), region, permission, memory_protection_key_);
+ } else {
+ TRACE_HEAP("Setting rwx permissions for 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
+ region.begin(), region.end());
+ success = SetPermissions(GetPlatformPageAllocator(), region.begin(),
+ region.size(), permission);
+ }
- if (!SetPermissions(GetPlatformPageAllocator(), region.begin(), region.size(),
- permission)) {
- // Highly unlikely.
+ if (V8_UNLIKELY(!success)) {
V8::FatalProcessOutOfMemory(
nullptr,
"WasmCodeManager::Commit: Cannot make pre-reserved region writable");
@@ -1739,8 +1800,13 @@ void WasmCodeManager::Decommit(base::AddressRegion region) {
USE(old_committed);
TRACE_HEAP("Discarding system pages 0x%" PRIxPTR ":0x%" PRIxPTR "\n",
region.begin(), region.end());
- CHECK(allocator->SetPermissions(reinterpret_cast<void*>(region.begin()),
- region.size(), PageAllocator::kNoAccess));
+ if (FLAG_wasm_memory_protection_keys) {
+ CHECK(SetPermissionsAndMemoryProtectionKey(
+ allocator, region, PageAllocator::kNoAccess, kNoMemoryProtectionKey));
+ } else {
+ CHECK(SetPermissions(allocator, region.begin(), region.size(),
+ PageAllocator::kNoAccess));
+ }
}
void WasmCodeManager::AssignRange(base::AddressRegion region,
@@ -2002,10 +2068,15 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
DCHECK(result.succeeded());
total_code_space += RoundUp<kCodeAlignment>(result.code_desc.instr_size);
}
- Vector<byte> code_space =
- code_allocator_.AllocateForCode(this, total_code_space);
- // Lookup the jump tables to use once, then use for all code objects.
- auto jump_tables = FindJumpTablesForRegion(base::AddressRegionOf(code_space));
+ Vector<byte> code_space;
+ NativeModule::JumpTablesRef jump_tables;
+ {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ code_space = code_allocator_.AllocateForCode(this, total_code_space);
+ // Lookup the jump tables to use once, then use for all code objects.
+ jump_tables =
+ FindJumpTablesForRegionLocked(base::AddressRegionOf(code_space));
+ }
// If we happen to have a {total_code_space} which is bigger than
// {kMaxCodeSpaceSize}, we would not find valid jump tables for the whole
// region. If this ever happens, we need to handle this case (by splitting the
@@ -2017,6 +2088,7 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
// Now copy the generated code into the code space and relocate it.
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(this);
for (auto& result : results) {
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);
@@ -2039,12 +2111,12 @@ void NativeModule::SetTieringState(TieringState new_tiering_state) {
// Do not tier down asm.js (just never change the tiering state).
if (module()->origin != kWasmOrigin) return;
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
tiering_state_ = new_tiering_state;
}
bool NativeModule::IsTieredDown() {
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
return tiering_state_ == kTieredDown;
}
@@ -2054,7 +2126,7 @@ void NativeModule::RecompileForTiering() {
// compilation units finish, code installation will handle that correctly.
TieringState current_state;
{
- base::MutexGuard lock(&allocation_mutex_);
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
current_state = tiering_state_;
// Initialize {cached_code_} to signal that this cache should get filled
@@ -2074,7 +2146,7 @@ void NativeModule::RecompileForTiering() {
std::vector<int> NativeModule::FindFunctionsToRecompile(
TieringState new_tiering_state) {
WasmCodeRefScope code_ref_scope;
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
std::vector<int> function_indexes;
int imported = module()->num_imported_functions;
int declared = module()->num_declared_functions;
@@ -2110,19 +2182,16 @@ std::vector<int> NativeModule::FindFunctionsToRecompile(
}
void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
// Free the code space.
code_allocator_.FreeCode(codes);
- DebugInfo* debug_info = nullptr;
- {
- base::MutexGuard guard(&allocation_mutex_);
- if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
- debug_info = debug_info_.get();
- // Free the {WasmCode} objects. This will also unregister trap handler data.
- for (WasmCode* code : codes) {
- DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
- owned_code_.erase(code->instruction_start());
- }
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
+ DebugInfo* debug_info = debug_info_.get();
+ // Free the {WasmCode} objects. This will also unregister trap handler data.
+ for (WasmCode* code : codes) {
+ DCHECK_EQ(1, owned_code_.count(code->instruction_start()));
+ owned_code_.erase(code->instruction_start());
}
// Remove debug side tables for all removed code objects, after releasing our
// lock. This is to avoid lock order inversion.
@@ -2130,16 +2199,17 @@ void NativeModule::FreeCode(Vector<WasmCode* const> codes) {
}
size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
return code_allocator_.GetNumCodeSpaces();
}
bool NativeModule::HasDebugInfo() const {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
return debug_info_ != nullptr;
}
DebugInfo* NativeModule::GetDebugInfo() {
- base::MutexGuard guard(&allocation_mutex_);
+ base::RecursiveMutexGuard guard(&allocation_mutex_);
if (!debug_info_) debug_info_ = std::make_unique<DebugInfo>(this);
return debug_info_.get();
}
@@ -2200,17 +2270,30 @@ WasmCode* WasmCodeManager::LookupCode(Address pc) const {
NativeModuleModificationScope::NativeModuleModificationScope(
NativeModule* native_module)
: native_module_(native_module) {
- if (FLAG_wasm_write_protect_code_memory && native_module_ &&
- (native_module_->modification_scope_depth_++) == 0) {
- bool success = native_module_->SetExecutable(false);
+ DCHECK_NOT_NULL(native_module_);
+ if (FLAG_wasm_memory_protection_keys) {
+ bool success = native_module_->SetThreadWritable(true);
+ if (!success && FLAG_wasm_write_protect_code_memory) {
+ // Fallback to mprotect-based write protection (much slower).
+ success = native_module_->SetWritable(true);
+ CHECK(success);
+ }
+ } else if (FLAG_wasm_write_protect_code_memory) {
+ bool success = native_module_->SetWritable(true);
CHECK(success);
}
}
NativeModuleModificationScope::~NativeModuleModificationScope() {
- if (FLAG_wasm_write_protect_code_memory && native_module_ &&
- (native_module_->modification_scope_depth_--) == 1) {
- bool success = native_module_->SetExecutable(true);
+ if (FLAG_wasm_memory_protection_keys) {
+ bool success = native_module_->SetThreadWritable(false);
+ if (!success && FLAG_wasm_write_protect_code_memory) {
+ // Fallback to mprotect-based write protection (much slower).
+ success = native_module_->SetWritable(false);
+ CHECK(success);
+ }
+ } else if (FLAG_wasm_write_protect_code_memory) {
+ bool success = native_module_->SetWritable(false);
CHECK(success);
}
}
diff --git a/chromium/v8/src/wasm/wasm-code-manager.h b/chromium/v8/src/wasm/wasm-code-manager.h
index 2996a6e2c67..312f5346b44 100644
--- a/chromium/v8/src/wasm/wasm-code-manager.h
+++ b/chromium/v8/src/wasm/wasm-code-manager.h
@@ -26,6 +26,7 @@
#include "src/trap-handler/trap-handler.h"
#include "src/utils/vector.h"
#include "src/wasm/compilation-environment.h"
+#include "src/wasm/memory-protection-key.h"
#include "src/wasm/wasm-features.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module-sourcemap.h"
@@ -94,7 +95,8 @@ struct WasmModule;
V(WasmAllocateArrayWithRtt) \
V(WasmAllocateRtt) \
V(WasmAllocateStructWithRtt) \
- V(WasmSubtypeCheck)
+ V(WasmSubtypeCheck) \
+ V(WasmOnStackReplace)
// Sorted, disjoint and non-overlapping memory regions. A region is of the
// form [start, end). So there's no [start, end), [end, other_end),
@@ -160,11 +162,7 @@ class V8_EXPORT_PRIVATE WasmCode final {
return {reloc_info().end(), static_cast<size_t>(source_positions_size_)};
}
- // TODO(clemensb): Make this return int.
- uint32_t index() const {
- DCHECK_LE(0, index_);
- return index_;
- }
+ int index() const { return index_; }
// Anonymous functions are functions that don't carry an index.
bool IsAnonymous() const { return index_ == kAnonymousFuncIndex; }
Kind kind() const { return KindField::decode(flags_); }
@@ -397,31 +395,11 @@ class WasmCodeAllocator {
static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
#endif
- // {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
- // indicate that the lock on the {WasmCodeAllocator} is already taken. It's
- // optional to allow to also call methods without holding the lock.
- class OptionalLock {
- public:
- // External users can only instantiate a non-locked {OptionalLock}.
- OptionalLock() = default;
- ~OptionalLock();
- bool is_locked() const { return allocator_ != nullptr; }
-
- private:
- friend class WasmCodeAllocator;
- // {Lock} is called from the {WasmCodeAllocator} if no locked {OptionalLock}
- // is passed.
- void Lock(WasmCodeAllocator*);
-
- WasmCodeAllocator* allocator_ = nullptr;
- };
-
- WasmCodeAllocator(WasmCodeManager*, VirtualMemory code_space,
- std::shared_ptr<Counters> async_counters);
+ WasmCodeAllocator(WasmCodeManager*, std::shared_ptr<Counters> async_counters);
~WasmCodeAllocator();
// Call before use, after the {NativeModule} is set up completely.
- void Init(NativeModule*);
+ void Init(VirtualMemory code_space);
size_t committed_code_space() const {
return committed_code_space_.load(std::memory_order_acquire);
@@ -434,22 +412,32 @@ class WasmCodeAllocator {
}
// Allocate code space. Returns a valid buffer or fails with OOM (crash).
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCode(NativeModule*, size_t size);
// Allocate code space within a specific region. Returns a valid buffer or
// fails with OOM (crash).
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
- base::AddressRegion,
- const WasmCodeAllocator::OptionalLock&);
+ base::AddressRegion);
+
+ // Sets permissions of all owned code space to read-write or read-only (if
+ // {writable} is false). Returns true on success.
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
+ V8_EXPORT_PRIVATE bool SetWritable(bool writable);
- // Sets permissions of all owned code space to executable, or read-write (if
- // {executable} is false). Returns true on success.
- V8_EXPORT_PRIVATE bool SetExecutable(bool executable);
+ // Set this thread's permission of all owned code space to read-write or
+ // read-only (if {writable} is false). Uses memory protection keys.
+ // Returns true on success. Since the permission is thread-local, there is no
+ // requirement to hold any lock when calling this method.
+ bool SetThreadWritable(bool writable);
// Free memory pages of all given code objects. Used for wasm code GC.
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
void FreeCode(Vector<WasmCode* const>);
// Retrieve the number of separately reserved code spaces.
+ // Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
size_t GetNumCodeSpaces() const;
private:
@@ -461,10 +449,8 @@ class WasmCodeAllocator {
// The engine-wide wasm code manager.
WasmCodeManager* const code_manager_;
- mutable base::Mutex mutex_;
-
//////////////////////////////////////////////////////////////////////////////
- // Protected by {mutex_}:
+ // These fields are protected by the mutex in {NativeModule}.
// Code space that was reserved and is available for allocations (subset of
// {owned_code_space_}).
@@ -476,6 +462,8 @@ class WasmCodeAllocator {
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
+ int writers_count_{0};
+
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
@@ -483,8 +471,6 @@ class WasmCodeAllocator {
std::atomic<size_t> generated_code_size_{0};
std::atomic<size_t> freed_code_size_{0};
- bool is_executable_ = false;
-
std::shared_ptr<Counters> async_counters_;
};
@@ -523,7 +509,15 @@ class V8_EXPORT_PRIVATE NativeModule final {
// table and jump table via another {PublishCode}.
void ReinstallDebugCode(WasmCode*);
- Vector<uint8_t> AllocateForDeserializedCode(size_t total_code_size);
+ struct JumpTablesRef {
+ Address jump_table_start = kNullAddress;
+ Address far_jump_table_start = kNullAddress;
+
+ bool is_valid() const { return far_jump_table_start != kNullAddress; }
+ };
+
+ std::pair<Vector<uint8_t>, JumpTablesRef> AllocateForDeserializedCode(
+ size_t total_code_size);
std::unique_ptr<WasmCode> AddDeserializedCode(
int index, Vector<byte> instructions, int stack_slots,
@@ -564,26 +558,19 @@ class V8_EXPORT_PRIVATE NativeModule final {
// the first jump table).
Address GetCallTargetForFunction(uint32_t func_index) const;
- struct JumpTablesRef {
- Address jump_table_start = kNullAddress;
- Address far_jump_table_start = kNullAddress;
-
- bool is_valid() const { return far_jump_table_start != kNullAddress; }
- };
-
// Finds the jump tables that should be used for given code region. This
// information is then passed to {GetNearCallTargetForFunction} and
// {GetNearRuntimeStubEntry} to avoid the overhead of looking this information
// up there. Return an empty struct if no suitable jump tables exist.
- JumpTablesRef FindJumpTablesForRegion(base::AddressRegion) const;
+ JumpTablesRef FindJumpTablesForRegionLocked(base::AddressRegion) const;
// Similarly to {GetCallTargetForFunction}, but uses the jump table previously
- // looked up via {FindJumpTablesForRegion}.
+ // looked up via {FindJumpTablesForRegionLocked}.
Address GetNearCallTargetForFunction(uint32_t func_index,
const JumpTablesRef&) const;
// Get a runtime stub entry (which is a far jump table slot) in the jump table
- // previously looked up via {FindJumpTablesForRegion}.
+ // previously looked up via {FindJumpTablesForRegionLocked}.
Address GetNearRuntimeStubEntry(WasmCode::RuntimeStubId index,
const JumpTablesRef&) const;
@@ -591,8 +578,13 @@ class V8_EXPORT_PRIVATE NativeModule final {
// to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
- bool SetExecutable(bool executable) {
- return code_allocator_.SetExecutable(executable);
+ bool SetWritable(bool writable) {
+ base::RecursiveMutexGuard guard{&allocation_mutex_};
+ return code_allocator_.SetWritable(writable);
+ }
+
+ bool SetThreadWritable(bool writable) {
+ return code_allocator_.SetThreadWritable(writable);
}
// For cctests, where we build both WasmModule and the runtime objects
@@ -726,9 +718,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
ExecutionTier tier, ForDebugging for_debugging,
Vector<uint8_t> code_space, const JumpTablesRef& jump_tables_ref);
- WasmCode* CreateEmptyJumpTableInRegion(
- int jump_table_size, base::AddressRegion,
- const WasmCodeAllocator::OptionalLock&);
+ WasmCode* CreateEmptyJumpTableInRegionLocked(int jump_table_size,
+ base::AddressRegion);
void UpdateCodeSize(size_t, ExecutionTier, ForDebugging);
@@ -740,8 +731,7 @@ class V8_EXPORT_PRIVATE NativeModule final {
Address target);
// Called by the {WasmCodeAllocator} to register a new code space.
- void AddCodeSpace(base::AddressRegion,
- const WasmCodeAllocator::OptionalLock&);
+ void AddCodeSpaceLocked(base::AddressRegion);
// Hold the {allocation_mutex_} when calling {PublishCodeLocked}.
WasmCode* PublishCodeLocked(std::unique_ptr<WasmCode>);
@@ -806,7 +796,12 @@ class V8_EXPORT_PRIVATE NativeModule final {
std::unique_ptr<uint32_t[]> num_liftoff_function_calls_;
// This mutex protects concurrent calls to {AddCode} and friends.
- mutable base::Mutex allocation_mutex_;
+ // TODO(dlehmann): Revert this to a regular {Mutex} again.
+ // This needs to be a {RecursiveMutex} only because of
+ // {NativeModuleModificationScope} usages, which are (1) either at places
+ // that already hold the {allocation_mutex_} or (2) because of multiple open
+ // {NativeModuleModificationScope}s in the call hierarchy. Both are fixable.
+ mutable base::RecursiveMutex allocation_mutex_;
//////////////////////////////////////////////////////////////////////////////
// Protected by {allocation_mutex_}:
@@ -847,7 +842,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// End of fields protected by {allocation_mutex_}.
//////////////////////////////////////////////////////////////////////////////
- int modification_scope_depth_ = 0;
UseTrapHandler use_trap_handler_ = kNoTrapHandler;
bool lazy_compile_frozen_ = false;
std::atomic<size_t> liftoff_bailout_count_{0};
@@ -861,12 +855,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
WasmCodeManager(const WasmCodeManager&) = delete;
WasmCodeManager& operator=(const WasmCodeManager&) = delete;
-#ifdef DEBUG
- ~WasmCodeManager() {
- // No more committed code space.
- DCHECK_EQ(0, total_committed_code_space_.load());
- }
-#endif
+ ~WasmCodeManager();
#if defined(V8_OS_WIN64)
bool CanRegisterUnwindInfoForNonABICompliantCodeRange() const;
@@ -922,6 +911,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// and updated after each GC.
std::atomic<size_t> critical_committed_code_space_;
+ const int memory_protection_key_;
+
mutable base::Mutex native_modules_mutex_;
//////////////////////////////////////////////////////////////////////////////
diff --git a/chromium/v8/src/wasm/wasm-constants.h b/chromium/v8/src/wasm/wasm-constants.h
index a02ea78e850..f960e7c201a 100644
--- a/chromium/v8/src/wasm/wasm-constants.h
+++ b/chromium/v8/src/wasm/wasm-constants.h
@@ -101,10 +101,11 @@ enum SectionCode : int8_t {
kDebugInfoSectionCode, // DWARF section .debug_info
kExternalDebugInfoSectionCode, // Section encoding the external symbol path
kCompilationHintsSectionCode, // Compilation hints section
+ kBranchHintsSectionCode, // Branch hints section
// Helper values
kFirstSectionInModule = kTypeSectionCode,
- kLastKnownModuleSection = kCompilationHintsSectionCode,
+ kLastKnownModuleSection = kBranchHintsSectionCode,
kFirstUnorderedSection = kDataCountSectionCode,
};
@@ -156,6 +157,10 @@ constexpr int kAnonymousFuncIndex = -1;
// often enough.
constexpr uint32_t kGenericWrapperBudget = 1000;
+#if V8_TARGET_ARCH_X64
+constexpr int32_t kOSRTargetOffset = 3 * kSystemPointerSize;
+#endif
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-debug.cc b/chromium/v8/src/wasm/wasm-debug.cc
index 22872f5d884..ad4e7853aa1 100644
--- a/chromium/v8/src/wasm/wasm-debug.cc
+++ b/chromium/v8/src/wasm/wasm-debug.cc
@@ -697,13 +697,19 @@ class DebugInfoImpl {
DCHECK_EQ(frame->function_index(), new_code->index());
DCHECK_EQ(frame->native_module(), new_code->native_module());
DCHECK(frame->wasm_code()->is_liftoff());
+ Address new_pc =
+ FindNewPC(frame, new_code, frame->byte_offset(), return_location);
#ifdef DEBUG
int old_position = frame->position();
#endif
- Address new_pc =
- FindNewPC(frame, new_code, frame->byte_offset(), return_location);
+#if V8_TARGET_ARCH_X64
+ if (frame->wasm_code()->for_debugging()) {
+ base::Memory<Address>(frame->fp() - kOSRTargetOffset) = new_pc;
+ }
+#else
PointerAuthentication::ReplacePC(frame->pc_address(), new_pc,
kSystemPointerSize);
+#endif
// The frame position should still be the same after OSR.
DCHECK_EQ(old_position, frame->position());
}
diff --git a/chromium/v8/src/wasm/wasm-engine.cc b/chromium/v8/src/wasm/wasm-engine.cc
index ed19f89a5e8..c38236dc789 100644
--- a/chromium/v8/src/wasm/wasm-engine.cc
+++ b/chromium/v8/src/wasm/wasm-engine.cc
@@ -18,6 +18,7 @@
#include "src/strings/string-hasher-inl.h"
#include "src/utils/ostreams.h"
#include "src/wasm/function-compiler.h"
+#include "src/wasm/memory-protection-key.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/module-instantiate.h"
@@ -347,7 +348,8 @@ struct WasmEngine::CurrentGCInfo {
struct WasmEngine::IsolateInfo {
explicit IsolateInfo(Isolate* isolate)
: log_codes(WasmCode::ShouldBeLogged(isolate)),
- async_counters(isolate->async_counters()) {
+ async_counters(isolate->async_counters()),
+ wrapper_compilation_barrier_(std::make_shared<OperationsBarrier>()) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner = platform->GetForegroundTaskRunner(v8_isolate);
@@ -398,6 +400,12 @@ struct WasmEngine::IsolateInfo {
int throw_count = 0;
int rethrow_count = 0;
int catch_count = 0;
+
+ // Operations barrier to synchronize on wrapper compilation on isolate
+ // shutdown.
+ // TODO(wasm): Remove this once we can use the generic js-to-wasm wrapper
+ // everywhere.
+ std::shared_ptr<OperationsBarrier> wrapper_compilation_barrier_;
};
struct WasmEngine::NativeModuleInfo {
@@ -934,9 +942,10 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
// Under the mutex get all jobs to delete. Then delete them without holding
// the mutex, such that deletion can reenter the WasmEngine.
std::vector<std::unique_ptr<AsyncCompileJob>> jobs_to_delete;
+ std::vector<std::weak_ptr<NativeModule>> modules_in_isolate;
+ std::shared_ptr<OperationsBarrier> wrapper_compilation_barrier;
{
base::MutexGuard guard(&mutex_);
- DCHECK_EQ(1, isolates_.count(isolate));
for (auto it = async_compile_jobs_.begin();
it != async_compile_jobs_.end();) {
if (it->first->isolate() != isolate) {
@@ -946,7 +955,34 @@ void WasmEngine::DeleteCompileJobsOnIsolate(Isolate* isolate) {
jobs_to_delete.push_back(std::move(it->second));
it = async_compile_jobs_.erase(it);
}
+ DCHECK_EQ(1, isolates_.count(isolate));
+ auto* isolate_info = isolates_[isolate].get();
+ wrapper_compilation_barrier = isolate_info->wrapper_compilation_barrier_;
+ for (auto* native_module : isolate_info->native_modules) {
+ DCHECK_EQ(1, native_modules_.count(native_module));
+ modules_in_isolate.emplace_back(native_modules_[native_module]->weak_ptr);
+ }
}
+
+ // All modules that have not finished initial compilation yet cannot be
+ // shared with other isolates. Hence we cancel their compilation. In
+ // particular, this will cancel wrapper compilation which is bound to this
+ // isolate (this would be a UAF otherwise).
+ for (auto& weak_module : modules_in_isolate) {
+ if (auto shared_module = weak_module.lock()) {
+ shared_module->compilation_state()->CancelInitialCompilation();
+ }
+ }
+
+ // After cancelling, wait for all current wrapper compilation to actually
+ // finish.
+ wrapper_compilation_barrier->CancelAndWait();
+}
+
+OperationsBarrier::Token WasmEngine::StartWrapperCompilation(Isolate* isolate) {
+ base::MutexGuard guard(&mutex_);
+ DCHECK_EQ(1, isolates_.count(isolate));
+ return isolates_[isolate]->wrapper_compilation_barrier_->TryLock();
}
void WasmEngine::AddIsolate(Isolate* isolate) {
@@ -954,6 +990,15 @@ void WasmEngine::AddIsolate(Isolate* isolate) {
DCHECK_EQ(0, isolates_.count(isolate));
isolates_.emplace(isolate, std::make_unique<IsolateInfo>(isolate));
+ // Record memory protection key support.
+ if (FLAG_wasm_memory_protection_keys) {
+ auto* histogram =
+ isolate->counters()->wasm_memory_protection_keys_support();
+ bool has_mpk =
+ code_manager()->memory_protection_key_ != kNoMemoryProtectionKey;
+ histogram->AddSample(has_mpk ? 1 : 0);
+ }
+
// Install sampling GC callback.
// TODO(v8:7424): For now we sample module sizes in a GC callback. This will
// bias samples towards apps with high memory pressure. We should switch to
@@ -1281,6 +1326,18 @@ void WasmEngine::ReportLiveCodeFromStackForGC(Isolate* isolate) {
StackFrame* const frame = it.frame();
if (frame->type() != StackFrame::WASM) continue;
live_wasm_code.insert(WasmFrame::cast(frame)->wasm_code());
+#if V8_TARGET_ARCH_X64
+ if (WasmFrame::cast(frame)->wasm_code()->for_debugging()) {
+ Address osr_target = base::Memory<Address>(WasmFrame::cast(frame)->fp() -
+ kOSRTargetOffset);
+ if (osr_target) {
+ WasmCode* osr_code =
+ isolate->wasm_engine()->code_manager()->LookupCode(osr_target);
+ DCHECK_NOT_NULL(osr_code);
+ live_wasm_code.insert(osr_code);
+ }
+ }
+#endif
}
CheckNoArchivedThreads(isolate);
@@ -1514,24 +1571,29 @@ void WasmEngine::PotentiallyFinishCurrentGC() {
namespace {
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<WasmEngine>,
- GetSharedWasmEngine)
+WasmEngine* global_wasm_engine = nullptr;
} // namespace
// static
void WasmEngine::InitializeOncePerProcess() {
- *GetSharedWasmEngine() = std::make_shared<WasmEngine>();
+ DCHECK_NULL(global_wasm_engine);
+ global_wasm_engine = new WasmEngine();
}
// static
void WasmEngine::GlobalTearDown() {
- GetSharedWasmEngine()->reset();
+ // Note: This can be called multiple times in a row (see
+ // test-api/InitializeAndDisposeMultiple). This is fine, as
+ // {global_wasm_engine} will be nullptr then.
+ delete global_wasm_engine;
+ global_wasm_engine = nullptr;
}
// static
-std::shared_ptr<WasmEngine> WasmEngine::GetWasmEngine() {
- return *GetSharedWasmEngine();
+WasmEngine* WasmEngine::GetWasmEngine() {
+ DCHECK_NOT_NULL(global_wasm_engine);
+ return global_wasm_engine;
}
// {max_mem_pages} is declared in wasm-limits.h.
diff --git a/chromium/v8/src/wasm/wasm-engine.h b/chromium/v8/src/wasm/wasm-engine.h
index 5a26bd44577..d4736036cba 100644
--- a/chromium/v8/src/wasm/wasm-engine.h
+++ b/chromium/v8/src/wasm/wasm-engine.h
@@ -246,6 +246,12 @@ class V8_EXPORT_PRIVATE WasmEngine {
// for tearing down an isolate, or to clean it up to be reused.
void DeleteCompileJobsOnIsolate(Isolate* isolate);
+ // Get a token for compiling wrappers for an Isolate. The token is used to
+ // synchronize background tasks on isolate shutdown. The caller should only
+ // hold the token while compiling export wrappers. If the isolate is already
+ // shutting down, this method will return an invalid token.
+ OperationsBarrier::Token StartWrapperCompilation(Isolate*);
+
// Manage the set of Isolates that use this WasmEngine.
void AddIsolate(Isolate* isolate);
void RemoveIsolate(Isolate* isolate);
@@ -351,10 +357,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
static void InitializeOncePerProcess();
static void GlobalTearDown();
- // Returns a reference to the WasmEngine shared by the entire process. Try to
- // use {Isolate::wasm_engine} instead if it is available, which encapsulates
- // engine lifetime decisions during Isolate bootstrapping.
- static std::shared_ptr<WasmEngine> GetWasmEngine();
+ // Returns a reference to the WasmEngine shared by the entire process.
+ static WasmEngine* GetWasmEngine();
private:
struct CurrentGCInfo;
diff --git a/chromium/v8/src/wasm/wasm-feature-flags.h b/chromium/v8/src/wasm/wasm-feature-flags.h
index 9c790d7c67e..9adf3d662f1 100644
--- a/chromium/v8/src/wasm/wasm-feature-flags.h
+++ b/chromium/v8/src/wasm/wasm-feature-flags.h
@@ -37,7 +37,12 @@
/* Relaxed SIMD proposal. */ \
/* https://github.com/WebAssembly/relaxed-simd */ \
/* V8 side owner: zhin */ \
- V(relaxed_simd, "relaxed simd", false)
+ V(relaxed_simd, "relaxed simd", false) \
+ \
+ /* Branch Hinting proposal. */ \
+ /* https://github.com/WebAssembly/branch-hinting */ \
+ /* V8 side owner: jkummerow */ \
+ V(branch_hinting, "branch hinting", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -76,13 +81,6 @@
// Shipped features (enabled by default). Remove the feature flag once they hit
// stable and are expected to stay enabled.
#define FOREACH_WASM_SHIPPED_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Multi-value proposal. */ \
- /* https://github.com/WebAssembly/multi-value */ \
- /* V8 side owner: thibaudm */ \
- /* Shipped in v8.6. */ \
- /* ITS: https://groups.google.com/g/v8-users/c/pv2E4yFWeF0 */ \
- V(mv, "multi-value support", true) \
- \
/* Fixed-width SIMD operations. */ \
/* https://github.com/webassembly/simd */ \
/* V8 side owner: gdeepti, zhin */ \
diff --git a/chromium/v8/src/wasm/wasm-init-expr.cc b/chromium/v8/src/wasm/wasm-init-expr.cc
new file mode 100644
index 00000000000..6348c581936
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-init-expr.cc
@@ -0,0 +1,57 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-init-expr.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+std::ostream& operator<<(std::ostream& os, const WasmInitExpr& expr) {
+ os << "(";
+ switch (expr.kind()) {
+ case WasmInitExpr::kNone:
+ UNREACHABLE();
+ case WasmInitExpr::kGlobalGet:
+ os << "global.get " << expr.immediate().index;
+ break;
+ case WasmInitExpr::kI32Const:
+ os << "i32.const " << expr.immediate().i32_const;
+ break;
+ case WasmInitExpr::kI64Const:
+ os << "i64.const " << expr.immediate().i64_const;
+ break;
+ case WasmInitExpr::kF32Const:
+ os << "f32.const " << expr.immediate().f32_const;
+ break;
+ case WasmInitExpr::kF64Const:
+ os << "f64.const " << expr.immediate().f64_const;
+ break;
+ case WasmInitExpr::kS128Const:
+ os << "s128.const 0x" << std::hex;
+ for (uint8_t b : expr.immediate().s128_const) {
+ os << b;
+ }
+ os << std::dec;
+ break;
+ case WasmInitExpr::kRefNullConst:
+ os << "ref.null " << expr.immediate().heap_type;
+ break;
+ case WasmInitExpr::kRefFuncConst:
+ os << "ref.func " << expr.immediate().index;
+ break;
+ case WasmInitExpr::kRttCanon:
+ os << "rtt.canon " << expr.immediate().heap_type;
+ break;
+ case WasmInitExpr::kRttSub:
+ os << "rtt.sub " << *expr.operand();
+ break;
+ }
+ os << ")";
+ return os;
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-init-expr.h b/chromium/v8/src/wasm/wasm-init-expr.h
new file mode 100644
index 00000000000..39fc1a7ee6a
--- /dev/null
+++ b/chromium/v8/src/wasm/wasm-init-expr.h
@@ -0,0 +1,150 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_WASM_WASM_INIT_EXPR_H_
+#define V8_WASM_WASM_INIT_EXPR_H_
+
+#include <memory>
+
+#include "src/wasm/value-type.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// Representation of an initializer expression.
+class WasmInitExpr {
+ public:
+ enum Operator {
+ kNone,
+ kGlobalGet,
+ kI32Const,
+ kI64Const,
+ kF32Const,
+ kF64Const,
+ kS128Const,
+ kRefNullConst,
+ kRefFuncConst,
+ kRttCanon,
+ kRttSub
+ };
+
+ union Immediate {
+ int32_t i32_const;
+ int64_t i64_const;
+ float f32_const;
+ double f64_const;
+ std::array<uint8_t, kSimd128Size> s128_const;
+ uint32_t index;
+ HeapType::Representation heap_type;
+ };
+
+ WasmInitExpr() : kind_(kNone) { immediate_.i32_const = 0; }
+ explicit WasmInitExpr(int32_t v) : kind_(kI32Const) {
+ immediate_.i32_const = v;
+ }
+ explicit WasmInitExpr(int64_t v) : kind_(kI64Const) {
+ immediate_.i64_const = v;
+ }
+ explicit WasmInitExpr(float v) : kind_(kF32Const) {
+ immediate_.f32_const = v;
+ }
+ explicit WasmInitExpr(double v) : kind_(kF64Const) {
+ immediate_.f64_const = v;
+ }
+ explicit WasmInitExpr(uint8_t v[kSimd128Size]) : kind_(kS128Const) {
+ base::Memcpy(immediate_.s128_const.data(), v, kSimd128Size);
+ }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInitExpr);
+
+ static WasmInitExpr GlobalGet(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kGlobalGet;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
+ static WasmInitExpr RefFuncConst(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kRefFuncConst;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
+ static WasmInitExpr RefNullConst(HeapType::Representation heap_type) {
+ WasmInitExpr expr;
+ expr.kind_ = kRefNullConst;
+ expr.immediate_.heap_type = heap_type;
+ return expr;
+ }
+
+ static WasmInitExpr RttCanon(uint32_t index) {
+ WasmInitExpr expr;
+ expr.kind_ = kRttCanon;
+ expr.immediate_.index = index;
+ return expr;
+ }
+
+ static WasmInitExpr RttSub(uint32_t index, WasmInitExpr supertype) {
+ WasmInitExpr expr;
+ expr.kind_ = kRttSub;
+ expr.immediate_.index = index;
+ expr.operand_ = std::make_unique<WasmInitExpr>(std::move(supertype));
+ return expr;
+ }
+
+ Immediate immediate() const { return immediate_; }
+ Operator kind() const { return kind_; }
+ WasmInitExpr* operand() const { return operand_.get(); }
+
+ bool operator==(const WasmInitExpr& other) const {
+ if (kind() != other.kind()) return false;
+ switch (kind()) {
+ case kNone:
+ return true;
+ case kGlobalGet:
+ case kRefFuncConst:
+ case kRttCanon:
+ return immediate().index == other.immediate().index;
+ case kI32Const:
+ return immediate().i32_const == other.immediate().i32_const;
+ case kI64Const:
+ return immediate().i64_const == other.immediate().i64_const;
+ case kF32Const:
+ return immediate().f32_const == other.immediate().f32_const;
+ case kF64Const:
+ return immediate().f64_const == other.immediate().f64_const;
+ case kS128Const:
+ return immediate().s128_const == other.immediate().s128_const;
+ case kRefNullConst:
+ return immediate().heap_type == other.immediate().heap_type;
+ case kRttSub:
+ return immediate().index == other.immediate().index &&
+ *operand() == *other.operand();
+ }
+ }
+
+ V8_INLINE bool operator!=(const WasmInitExpr& other) {
+ return !(*this == other);
+ }
+
+ private:
+ Immediate immediate_;
+ Operator kind_;
+ std::unique_ptr<WasmInitExpr> operand_ = nullptr;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const WasmInitExpr& expr);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_INIT_EXPR_H_
diff --git a/chromium/v8/src/wasm/wasm-js.cc b/chromium/v8/src/wasm/wasm-js.cc
index 7f1d8e261ff..70492135dec 100644
--- a/chromium/v8/src/wasm/wasm-js.cc
+++ b/chromium/v8/src/wasm/wasm-js.cc
@@ -1475,9 +1475,7 @@ void WebAssemblyFunction(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError("Argument 0 contains results without 'length'");
return;
}
- if (results_len > (enabled_features.has_mv()
- ? i::wasm::kV8MaxWasmFunctionMultiReturns
- : i::wasm::kV8MaxWasmFunctionReturns)) {
+ if (results_len > i::wasm::kV8MaxWasmFunctionReturns) {
thrower.TypeError("Argument 0 contains too many results");
return;
}
@@ -2034,7 +2032,8 @@ Handle<JSFunction> InstallConstructorFunc(Isolate* isolate,
Handle<JSObject> object,
const char* str,
FunctionCallback func) {
- return InstallFunc(isolate, object, str, func, 1, true, DONT_ENUM);
+ return InstallFunc(isolate, object, str, func, 1, true, DONT_ENUM,
+ SideEffectType::kHasNoSideEffect);
}
Handle<String> GetterName(Isolate* isolate, Handle<String> name) {
@@ -2064,7 +2063,8 @@ void InstallGetterSetter(Isolate* isolate, Handle<JSObject> object,
FunctionCallback setter) {
Handle<String> name = v8_str(isolate, str);
Handle<JSFunction> getter_func =
- CreateFunc(isolate, GetterName(isolate, name), getter, false);
+ CreateFunc(isolate, GetterName(isolate, name), getter, false,
+ SideEffectType::kHasNoSideEffect);
Handle<JSFunction> setter_func =
CreateFunc(isolate, SetterName(isolate, name), setter, false);
setter_func->shared().set_length(1);
@@ -2148,11 +2148,12 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::SetInitialMap(isolate, module_constructor, module_map,
module_proto);
InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
- 1);
+ 1, false, NONE, SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, module_constructor, "exports", WebAssemblyModuleExports,
- 1);
+ 1, false, NONE, SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, module_constructor, "customSections",
- WebAssemblyModuleCustomSections, 2);
+ WebAssemblyModuleCustomSections, 2, false, NONE,
+ SideEffectType::kHasNoSideEffect);
JSObject::AddProperty(isolate, module_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Module"), ro_attributes);
@@ -2192,7 +2193,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
JSFunction::SetInitialMap(isolate, table_constructor, table_map, table_proto);
InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
- InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
+ InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1, false, NONE,
+ SideEffectType::kHasNoSideEffect);
InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
if (enabled_features.has_type_reflection()) {
InstallFunc(isolate, table_constructor, "type", WebAssemblyTableType, 1);
@@ -2232,7 +2234,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
i::WASM_GLOBAL_OBJECT_TYPE, WasmGlobalObject::kHeaderSize);
JSFunction::SetInitialMap(isolate, global_constructor, global_map,
global_proto);
- InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0);
+ InstallFunc(isolate, global_proto, "valueOf", WebAssemblyGlobalValueOf, 0,
+ false, NONE, SideEffectType::kHasNoSideEffect);
InstallGetterSetter(isolate, global_proto, "value", WebAssemblyGlobalGetValue,
WebAssemblyGlobalSetValue);
if (enabled_features.has_type_reflection()) {
@@ -2243,15 +2246,8 @@ void WasmJs::Install(Isolate* isolate, bool exposed_on_global_object) {
// Setup Exception
if (enabled_features.has_eh()) {
- Handle<String> exception_name = v8_str(isolate, "Exception");
- Handle<JSFunction> exception_constructor =
- CreateFunc(isolate, exception_name, WebAssemblyException, true,
- SideEffectType::kHasSideEffect);
- exception_constructor->shared().set_length(1);
- JSObject::AddProperty(isolate, webassembly, exception_name,
- exception_constructor, DONT_ENUM);
- // Install the constructor on the context unconditionally so that it is also
- // available when the feature is enabled via the origin trial.
+ Handle<JSFunction> exception_constructor = InstallConstructorFunc(
+ isolate, webassembly, "Exception", WebAssemblyException);
context->set_wasm_exception_constructor(*exception_constructor);
SetDummyInstanceTemplate(isolate, exception_constructor);
JSFunction::EnsureHasInitialMap(exception_constructor);
@@ -2332,7 +2328,6 @@ void WasmJs::InstallConditionalFeatures(Isolate* isolate,
Handle<JSObject> webassembly = Handle<JSObject>::cast(webassembly_obj);
// Setup Exception
Handle<String> exception_name = v8_str(isolate, "Exception");
-
if (JSObject::HasOwnProperty(webassembly, exception_name).FromMaybe(true)) {
// The {Exception} constructor already exists, there is nothing more to
// do.
diff --git a/chromium/v8/src/wasm/wasm-limits.h b/chromium/v8/src/wasm/wasm-limits.h
index 78a8f0afd43..9e565db0e8b 100644
--- a/chromium/v8/src/wasm/wasm-limits.h
+++ b/chromium/v8/src/wasm/wasm-limits.h
@@ -43,8 +43,7 @@ constexpr size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
constexpr size_t kV8MaxWasmFunctionSize = 7654321;
constexpr size_t kV8MaxWasmFunctionLocals = 50000;
constexpr size_t kV8MaxWasmFunctionParams = 1000;
-constexpr size_t kV8MaxWasmFunctionMultiReturns = 1000;
-constexpr size_t kV8MaxWasmFunctionReturns = 1;
+constexpr size_t kV8MaxWasmFunctionReturns = 1000;
constexpr size_t kV8MaxWasmFunctionBrTableSize = 65520;
// Don't use this limit directly, but use the value of FLAG_wasm_max_table_size.
constexpr size_t kV8MaxWasmTableSize = 10000000;
@@ -57,7 +56,7 @@ constexpr size_t kV8MaxWasmStructFields = 999;
constexpr uint32_t kV8MaxRttSubtypingDepth = 31;
// Maximum supported by implementation: ((1<<27)-3).
// Reason: total object size in bytes must fit into a Smi, for filler objects.
-constexpr size_t kV8MaxWasmArrayLength = 1u << 24;
+constexpr size_t kV8MaxWasmArrayLength = 1u << 26;
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
"v8 should not exceed WebAssembly's non-web embedding limits");
diff --git a/chromium/v8/src/wasm/wasm-module-builder.cc b/chromium/v8/src/wasm/wasm-module-builder.cc
index 67f826f2fd9..41fa4f6b6bd 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.cc
+++ b/chromium/v8/src/wasm/wasm-module-builder.cc
@@ -13,7 +13,6 @@
#include "src/wasm/leb-helper.h"
#include "src/wasm/wasm-constants.h"
#include "src/wasm/wasm-module.h"
-#include "src/wasm/wasm-opcodes.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -301,6 +300,11 @@ uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
return index;
}
+// static
+const uint32_t WasmModuleBuilder::kNullIndex =
+ std::numeric_limits<uint32_t>::max();
+
+// TODO(9495): Add support for typed function tables and more init. expressions.
uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
DCHECK(allocating_indirect_functions_allowed_);
uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
@@ -310,7 +314,7 @@ uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
}
uint32_t new_size = static_cast<uint32_t>(indirect_functions_.size()) + count;
DCHECK(max_table_size_ == 0 || new_size <= max_table_size_);
- indirect_functions_.resize(new_size, WasmElemSegment::kNullIndex);
+ indirect_functions_.resize(new_size, kNullIndex);
uint32_t max = max_table_size_ > 0 ? max_table_size_ : new_size;
if (tables_.empty()) {
// This cannot use {AddTable} because that would flip the
@@ -710,13 +714,13 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
buffer->write_u8(0); // table index
uint32_t first_element = 0;
while (first_element < indirect_functions_.size() &&
- indirect_functions_[first_element] == WasmElemSegment::kNullIndex) {
+ indirect_functions_[first_element] == kNullIndex) {
first_element++;
}
uint32_t last_element =
static_cast<uint32_t>(indirect_functions_.size() - 1);
while (last_element >= first_element &&
- indirect_functions_[last_element] == WasmElemSegment::kNullIndex) {
+ indirect_functions_[last_element] == kNullIndex) {
last_element--;
}
buffer->write_u8(kExprI32Const); // offset
diff --git a/chromium/v8/src/wasm/wasm-module-builder.h b/chromium/v8/src/wasm/wasm-module-builder.h
index f7b5ff1b768..c1d15a834ea 100644
--- a/chromium/v8/src/wasm/wasm-module-builder.h
+++ b/chromium/v8/src/wasm/wasm-module-builder.h
@@ -297,6 +297,8 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
return types_[exceptions_[index]].sig;
}
+ static const uint32_t kNullIndex;
+
private:
struct Type {
enum Kind { kFunctionSig, kStructType, kArrayType };
diff --git a/chromium/v8/src/wasm/wasm-module.cc b/chromium/v8/src/wasm/wasm-module.cc
index c336dc5f7d1..acebe8d0e5e 100644
--- a/chromium/v8/src/wasm/wasm-module.cc
+++ b/chromium/v8/src/wasm/wasm-module.cc
@@ -21,25 +21,23 @@
#include "src/snapshot/snapshot.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-manager.h"
+#include "src/wasm/wasm-init-expr.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-result.h"
+#include "src/wasm/wasm-subtyping.h"
namespace v8 {
namespace internal {
namespace wasm {
-// static
-const uint32_t WasmElemSegment::kNullIndex;
-
WireBytesRef LazilyGeneratedNames::LookupFunctionName(
- const ModuleWireBytes& wire_bytes, uint32_t function_index,
- Vector<const WasmExport> export_table) const {
+ const ModuleWireBytes& wire_bytes, uint32_t function_index) const {
base::MutexGuard lock(&mutex_);
if (!function_names_) {
function_names_.reset(new std::unordered_map<uint32_t, WireBytesRef>());
DecodeFunctionNames(wire_bytes.start(), wire_bytes.end(),
- function_names_.get(), export_table);
+ function_names_.get());
}
auto it = function_names_->find(function_index);
if (it == function_names_->end()) return WireBytesRef();
@@ -180,7 +178,7 @@ WasmName ModuleWireBytes::GetNameOrNull(WireBytesRef ref) const {
WasmName ModuleWireBytes::GetNameOrNull(const WasmFunction* function,
const WasmModule* module) const {
return GetNameOrNull(module->lazily_generated_names.LookupFunctionName(
- *this, function->func_index, VectorOf(module->export_table)));
+ *this, function->func_index));
}
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
@@ -199,6 +197,8 @@ std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name) {
WasmModule::WasmModule(std::unique_ptr<Zone> signature_zone)
: signature_zone(std::move(signature_zone)) {}
+WasmModule::~WasmModule() { DeleteCachedTypeJudgementsForModule(this); }
+
bool IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
// TODO(wasm): Once wasm has its own CSP policy, we should introduce a
// separate callback that includes information about the module about to be
diff --git a/chromium/v8/src/wasm/wasm-module.h b/chromium/v8/src/wasm/wasm-module.h
index bbfcf9623b5..d185e673412 100644
--- a/chromium/v8/src/wasm/wasm-module.h
+++ b/chromium/v8/src/wasm/wasm-module.h
@@ -16,10 +16,11 @@
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "src/utils/vector.h"
+#include "src/wasm/branch-hint-map.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/struct-types.h"
#include "src/wasm/wasm-constants.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-init-expr.h"
namespace v8 {
@@ -107,30 +108,34 @@ struct WasmDataSegment {
// Static representation of wasm element segment (table initializer).
struct WasmElemSegment {
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
-
// Construct an active segment.
- WasmElemSegment(uint32_t table_index, WasmInitExpr offset)
- : type(kWasmFuncRef),
+ WasmElemSegment(ValueType type, uint32_t table_index, WasmInitExpr offset)
+ : type(type),
table_index(table_index),
offset(std::move(offset)),
status(kStatusActive) {}
// Construct a passive or declarative segment, which has no table index or
// offset.
- explicit WasmElemSegment(bool declarative)
- : type(kWasmFuncRef),
+ WasmElemSegment(ValueType type, bool declarative)
+ : type(type),
table_index(0),
status(declarative ? kStatusDeclarative : kStatusPassive) {}
- // Used in the {entries} vector to represent a `ref.null` entry in a passive
- // segment.
- V8_EXPORT_PRIVATE static const uint32_t kNullIndex = ~0u;
+ // Construct a passive or declarative segment, which has no table index or
+ // offset.
+ WasmElemSegment()
+ : type(kWasmBottom), table_index(0), status(kStatusActive) {}
+
+ WasmElemSegment(const WasmElemSegment&) = delete;
+ WasmElemSegment(WasmElemSegment&&) V8_NOEXCEPT = default;
+ WasmElemSegment& operator=(const WasmElemSegment&) = delete;
+ WasmElemSegment& operator=(WasmElemSegment&&) V8_NOEXCEPT = default;
ValueType type;
uint32_t table_index;
WasmInitExpr offset;
- std::vector<uint32_t> entries;
+ std::vector<WasmInitExpr> entries;
enum Status {
kStatusActive, // copied automatically during instantiation.
kStatusPassive, // copied explicitly after instantiation.
@@ -188,8 +193,7 @@ struct ModuleWireBytes;
class V8_EXPORT_PRIVATE LazilyGeneratedNames {
public:
WireBytesRef LookupFunctionName(const ModuleWireBytes& wire_bytes,
- uint32_t function_index,
- Vector<const WasmExport> export_table) const;
+ uint32_t function_index) const;
void AddForTesting(int function_index, WireBytesRef name);
@@ -335,6 +339,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::vector<WasmException> exceptions;
std::vector<WasmElemSegment> elem_segments;
std::vector<WasmCompilationHint> compilation_hints;
+ BranchHintInfo branch_hints;
SignatureMap signature_map; // canonicalizing map for signature indexes.
ModuleOrigin origin = kWasmOrigin; // origin of the module
@@ -347,6 +352,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
explicit WasmModule(std::unique_ptr<Zone> signature_zone = nullptr);
WasmModule(const WasmModule&) = delete;
+ ~WasmModule();
WasmModule& operator=(const WasmModule&) = delete;
};
diff --git a/chromium/v8/src/wasm/wasm-objects-inl.h b/chromium/v8/src/wasm/wasm-objects-inl.h
index 3da7e1650a1..e102fbd97fe 100644
--- a/chromium/v8/src/wasm/wasm-objects-inl.h
+++ b/chromium/v8/src/wasm/wasm-objects-inl.h
@@ -36,13 +36,14 @@ namespace internal {
OBJECT_CONSTRUCTORS_IMPL(WasmExceptionObject, JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmExceptionTag)
-OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmExportedFunctionData, WasmFunctionData)
OBJECT_CONSTRUCTORS_IMPL(WasmGlobalObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmInstanceObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmMemoryObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmModuleObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(WasmTableObject, JSObject)
OBJECT_CONSTRUCTORS_IMPL(AsmWasmData, Struct)
+TQ_OBJECT_CONSTRUCTORS_IMPL(WasmFunctionData)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmTypeInfo)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmStruct)
TQ_OBJECT_CONSTRUCTORS_IMPL(WasmArray)
@@ -55,6 +56,7 @@ CAST_ACCESSOR(WasmMemoryObject)
CAST_ACCESSOR(WasmModuleObject)
CAST_ACCESSOR(WasmTableObject)
CAST_ACCESSOR(AsmWasmData)
+CAST_ACCESSOR(WasmFunctionData)
CAST_ACCESSOR(WasmTypeInfo)
CAST_ACCESSOR(WasmStruct)
CAST_ACCESSOR(WasmArray)
@@ -329,18 +331,17 @@ WasmExportedFunction::WasmExportedFunction(Address ptr) : JSFunction(ptr) {
}
CAST_ACCESSOR(WasmExportedFunction)
+// WasmFunctionData
+ACCESSORS(WasmFunctionData, ref, Object, kRefOffset)
+
// WasmExportedFunctionData
ACCESSORS(WasmExportedFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmExportedFunctionData, instance, WasmInstanceObject,
kInstanceOffset)
-SMI_ACCESSORS(WasmExportedFunctionData, jump_table_offset,
- kJumpTableOffsetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, function_index, kFunctionIndexOffset)
ACCESSORS(WasmExportedFunctionData, signature, Foreign, kSignatureOffset)
SMI_ACCESSORS(WasmExportedFunctionData, wrapper_budget, kWrapperBudgetOffset)
ACCESSORS(WasmExportedFunctionData, c_wrapper_code, Object, kCWrapperCodeOffset)
-ACCESSORS(WasmExportedFunctionData, wasm_call_target, Object,
- kWasmCallTargetOffset)
SMI_ACCESSORS(WasmExportedFunctionData, packed_args_size, kPackedArgsSizeOffset)
wasm::FunctionSig* WasmExportedFunctionData::sig() const {
@@ -354,7 +355,7 @@ WasmJSFunction::WasmJSFunction(Address ptr) : JSFunction(ptr) {
CAST_ACCESSOR(WasmJSFunction)
// WasmJSFunctionData
-OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, Struct)
+OBJECT_CONSTRUCTORS_IMPL(WasmJSFunctionData, WasmFunctionData)
CAST_ACCESSOR(WasmJSFunctionData)
SMI_ACCESSORS(WasmJSFunctionData, serialized_return_count,
kSerializedReturnCountOffset)
@@ -362,7 +363,6 @@ SMI_ACCESSORS(WasmJSFunctionData, serialized_parameter_count,
kSerializedParameterCountOffset)
ACCESSORS(WasmJSFunctionData, serialized_signature, PodArray<wasm::ValueType>,
kSerializedSignatureOffset)
-ACCESSORS(WasmJSFunctionData, callable, JSReceiver, kCallableOffset)
ACCESSORS(WasmJSFunctionData, wrapper_code, Code, kWrapperCodeOffset)
ACCESSORS(WasmJSFunctionData, wasm_to_js_wrapper_code, Code,
kWasmToJsWrapperCodeOffset)
@@ -416,13 +416,27 @@ wasm::StructType* WasmStruct::type(Map map) {
wasm::StructType* WasmStruct::GcSafeType(Map map) {
DCHECK_EQ(WASM_STRUCT_TYPE, map.instance_type());
HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
- MapWord map_word = raw.map_word();
+ MapWord map_word = raw.map_word(kRelaxedLoad);
HeapObject forwarded =
map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
Foreign foreign = Foreign::cast(forwarded);
return reinterpret_cast<wasm::StructType*>(foreign.foreign_address());
}
+int WasmStruct::Size(const wasm::StructType* type) {
+ // Object size must fit into a Smi (because of filler objects), and its
+ // computation must not overflow.
+ STATIC_ASSERT(Smi::kMaxValue <= kMaxInt);
+ DCHECK_LE(type->total_fields_size(), Smi::kMaxValue - kHeaderSize);
+ return std::max(kHeaderSize + static_cast<int>(type->total_fields_size()),
+ Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
+}
+
+int WasmStruct::GcSafeSize(Map map) {
+ wasm::StructType* type = GcSafeType(map);
+ return Size(type);
+}
+
wasm::StructType* WasmStruct::type() const { return type(map()); }
ObjectSlot WasmStruct::RawField(int raw_offset) {
@@ -439,7 +453,7 @@ wasm::ArrayType* WasmArray::type(Map map) {
wasm::ArrayType* WasmArray::GcSafeType(Map map) {
DCHECK_EQ(WASM_ARRAY_TYPE, map.instance_type());
HeapObject raw = HeapObject::cast(map.constructor_or_back_pointer());
- MapWord map_word = raw.map_word();
+ MapWord map_word = raw.map_word(kRelaxedLoad);
HeapObject forwarded =
map_word.IsForwardingAddress() ? map_word.ToForwardingAddress() : raw;
Foreign foreign = Foreign::cast(forwarded);
@@ -460,12 +474,6 @@ int WasmArray::GcSafeSizeFor(Map map, int length) {
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
-
- // TODO(syg): V8_HEAP_SANDBOX doesn't work with pointer cage
-#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
-#error "V8_HEAP_SANDBOX requires per-Isolate pointer compression cage"
-#endif
-
// Due to the type-specific pointer tags for external pointers, we need to
// allocate an entry in the table here even though it will just store nullptr.
AllocateExternalPointerEntries(isolate);
diff --git a/chromium/v8/src/wasm/wasm-objects.cc b/chromium/v8/src/wasm/wasm-objects.cc
index d4e7cb65a13..718124debf1 100644
--- a/chromium/v8/src/wasm/wasm-objects.cc
+++ b/chromium/v8/src/wasm/wasm-objects.cc
@@ -226,33 +226,21 @@ MaybeHandle<String> WasmModuleObject::GetFunctionNameOrNull(
wasm::WireBytesRef name =
module_object->module()->lazily_generated_names.LookupFunctionName(
wasm::ModuleWireBytes(module_object->native_module()->wire_bytes()),
- func_index, VectorOf(module_object->module()->export_table));
+ func_index);
if (!name.is_set()) return {};
return ExtractUtf8StringFromModuleBytes(isolate, module_object, name,
kNoInternalize);
}
-Handle<String> WasmModuleObject::GetFunctionName(
- Isolate* isolate, Handle<WasmModuleObject> module_object,
- uint32_t func_index) {
- MaybeHandle<String> name =
- GetFunctionNameOrNull(isolate, module_object, func_index);
- if (!name.is_null()) return name.ToHandleChecked();
- EmbeddedVector<char, 32> buffer;
- DCHECK_GE(func_index, module_object->module()->num_imported_functions);
- int length = SNPrintF(buffer, "func%u", func_index);
- return isolate->factory()
- ->NewStringFromOneByte(Vector<uint8_t>::cast(buffer.SubVector(0, length)))
- .ToHandleChecked();
-}
-
-Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(
- uint32_t func_index) {
+Vector<const uint8_t> WasmModuleObject::GetRawFunctionName(int func_index) {
+ if (func_index == wasm::kAnonymousFuncIndex) {
+ return Vector<const uint8_t>({nullptr, 0});
+ }
DCHECK_GT(module()->functions.size(), func_index);
wasm::ModuleWireBytes wire_bytes(native_module()->wire_bytes());
wasm::WireBytesRef name_ref =
- module()->lazily_generated_names.LookupFunctionName(
- wire_bytes, func_index, VectorOf(module()->export_table));
+ module()->lazily_generated_names.LookupFunctionName(wire_bytes,
+ func_index);
wasm::WasmName name = wire_bytes.GetNameOrNull(name_ref);
return Vector<const uint8_t>::cast(name);
}
@@ -454,21 +442,18 @@ void WasmTableObject::Set(Isolate* isolate, Handle<WasmTableObject> table,
case wasm::HeapType::kEq:
case wasm::HeapType::kData:
case wasm::HeapType::kI31:
- // TODO(7748): Implement once we have a story for struct/arrays/i31ref in
- // JS.
- UNIMPLEMENTED();
+ // TODO(7748): Implement once we have struct/arrays/i31ref tables.
+ UNREACHABLE();
case wasm::HeapType::kBottom:
UNREACHABLE();
default:
DCHECK(!table->instance().IsUndefined());
- if (WasmInstanceObject::cast(table->instance())
- .module()
- ->has_signature(entry_index)) {
- SetFunctionTableEntry(isolate, table, entries, entry_index, entry);
- return;
- }
- // TODO(7748): Implement once we have a story for struct/arrays in JS.
- UNIMPLEMENTED();
+ // TODO(7748): Relax this once we have struct/array/i31ref tables.
+ DCHECK(WasmInstanceObject::cast(table->instance())
+ .module()
+ ->has_signature(table->type().ref_index()));
+ SetFunctionTableEntry(isolate, table, entries, entry_index, entry);
+ return;
}
}
@@ -509,18 +494,16 @@ Handle<Object> WasmTableObject::Get(Isolate* isolate,
UNREACHABLE();
default:
DCHECK(!table->instance().IsUndefined());
- if (WasmInstanceObject::cast(table->instance())
- .module()
- ->has_signature(entry_index)) {
- if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
- WasmJSFunction::IsWasmJSFunction(*entry) ||
- WasmCapiFunction::IsWasmCapiFunction(*entry)) {
- return entry;
- }
- break;
+ // TODO(7748): Relax this once we have struct/array/i31ref tables.
+ DCHECK(WasmInstanceObject::cast(table->instance())
+ .module()
+ ->has_signature(table->type().ref_index()));
+ if (WasmExportedFunction::IsWasmExportedFunction(*entry) ||
+ WasmJSFunction::IsWasmJSFunction(*entry) ||
+ WasmCapiFunction::IsWasmCapiFunction(*entry)) {
+ return entry;
}
- // TODO(7748): Implement once we have a story for struct/arrays in JS.
- UNIMPLEMENTED();
+ break;
}
// {entry} is not a valid entry in the table. It has to be a placeholder
@@ -1898,30 +1881,18 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
(export_wrapper->is_builtin() &&
export_wrapper->builtin_index() == Builtins::kGenericJSToWasmWrapper));
int num_imported_functions = instance->module()->num_imported_functions;
- int jump_table_offset = -1;
- if (func_index >= num_imported_functions) {
- uint32_t jump_table_diff =
- instance->module_object().native_module()->GetJumpTableOffset(
- func_index);
- DCHECK_GE(kMaxInt, jump_table_diff);
- jump_table_offset = static_cast<int>(jump_table_diff);
- }
+ Handle<Object> ref =
+ func_index >= num_imported_functions
+ ? instance
+ : handle(instance->imported_function_refs().get(func_index), isolate);
+
Factory* factory = isolate->factory();
const wasm::FunctionSig* sig = instance->module()->functions[func_index].sig;
- Handle<Foreign> sig_foreign =
- factory->NewForeign(reinterpret_cast<Address>(sig));
+ Address call_target = instance->GetCallTarget(func_index);
Handle<WasmExportedFunctionData> function_data =
- Handle<WasmExportedFunctionData>::cast(factory->NewStruct(
- WASM_EXPORTED_FUNCTION_DATA_TYPE, AllocationType::kOld));
- function_data->set_wrapper_code(*export_wrapper);
- function_data->set_instance(*instance);
- function_data->set_jump_table_offset(jump_table_offset);
- function_data->set_function_index(func_index);
- function_data->set_signature(*sig_foreign);
- function_data->set_wrapper_budget(wasm::kGenericWrapperBudget);
- function_data->set_c_wrapper_code(Smi::zero(), SKIP_WRITE_BARRIER);
- function_data->set_wasm_call_target(Smi::zero(), SKIP_WRITE_BARRIER);
- function_data->set_packed_args_size(0);
+ factory->NewWasmExportedFunctionData(
+ export_wrapper, instance, call_target, ref, func_index,
+ reinterpret_cast<Address>(sig), wasm::kGenericWrapperBudget);
MaybeHandle<String> maybe_name;
bool is_asm_js_module = instance->module_object().is_asm_js();
@@ -2042,18 +2013,18 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Handle<Code> wrapper_code =
compiler::CompileJSToJSWrapper(isolate, sig, nullptr).ToHandleChecked();
+ // WasmJSFunctions use on-heap Code objects as call targets, so we can't
+ // cache the target address, unless the WasmJSFunction wraps a
+ // WasmExportedFunction.
+ Address call_target = kNullAddress;
+ if (WasmExportedFunction::IsWasmExportedFunction(*callable)) {
+ call_target = WasmExportedFunction::cast(*callable).GetWasmCallTarget();
+ }
+
Factory* factory = isolate->factory();
- Handle<WasmJSFunctionData> function_data = Handle<WasmJSFunctionData>::cast(
- factory->NewStruct(WASM_JS_FUNCTION_DATA_TYPE, AllocationType::kOld));
- function_data->set_serialized_return_count(return_count);
- function_data->set_serialized_parameter_count(parameter_count);
- function_data->set_serialized_signature(*serialized_sig);
- function_data->set_callable(*callable);
- function_data->set_wrapper_code(*wrapper_code);
- // Use Abort() as a default value (it will never be called if not overwritten
- // below).
- function_data->set_wasm_to_js_wrapper_code(
- isolate->heap()->builtin(Builtins::kAbort));
+ Handle<WasmJSFunctionData> function_data = factory->NewWasmJSFunctionData(
+ call_target, callable, return_count, parameter_count, serialized_sig,
+ wrapper_code);
if (wasm::WasmFeatures::FromIsolate(isolate).has_typed_funcref()) {
using CK = compiler::WasmImportCallKind;
@@ -2094,7 +2065,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
}
JSReceiver WasmJSFunction::GetCallable() const {
- return shared().wasm_js_function_data().callable();
+ return JSReceiver::cast(
+ Tuple2::cast(shared().wasm_js_function_data().ref()).value2());
}
const wasm::FunctionSig* WasmJSFunction::GetSignature(Zone* zone) {
@@ -2256,7 +2228,8 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
if (WasmJSFunction::IsWasmJSFunction(*value)) {
// Since a WasmJSFunction cannot refer to indexed types (definable
- // only in a module), we do not need to use EquivalentTypes().
+ // only in a module), we do not need full function subtyping.
+ // TODO(manoskouk): Change this if wasm types can be exported.
if (!WasmJSFunction::cast(*value).MatchesSignature(
module->signature(expected.ref_index()))) {
*error_message =
@@ -2268,11 +2241,12 @@ bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
}
if (WasmCapiFunction::IsWasmCapiFunction(*value)) {
+ // Since a WasmCapiFunction cannot refer to indexed types
+ // (definable only in a module), we do not need full function
+ // subtyping.
+ // TODO(manoskouk): Change this if wasm types can be exported.
if (!WasmCapiFunction::cast(*value).MatchesSignature(
module->signature(expected.ref_index()))) {
- // Since a WasmCapiFunction cannot refer to indexed types
- // (definable in a module), we don't need to invoke
- // IsEquivalentType();
*error_message =
"assigned WasmCapiFunction has to be a subtype of the "
"expected type";
diff --git a/chromium/v8/src/wasm/wasm-objects.h b/chromium/v8/src/wasm/wasm-objects.h
index 47bef60ac71..473c4725cc4 100644
--- a/chromium/v8/src/wasm/wasm-objects.h
+++ b/chromium/v8/src/wasm/wasm-objects.h
@@ -166,17 +166,11 @@ class WasmModuleObject : public JSObject {
Handle<WasmModuleObject>,
uint32_t func_index);
- // Get the function name of the function identified by the given index.
- // Returns "func[func_index]" if the function is unnamed or the
- // name is not a valid UTF-8 string.
- static Handle<String> GetFunctionName(Isolate*, Handle<WasmModuleObject>,
- uint32_t func_index);
-
// Get the raw bytes of the function name of the function identified by the
// given index.
// Meant to be used for debugging or frame printing.
// Does not allocate, hence gc-safe.
- Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
+ Vector<const uint8_t> GetRawFunctionName(int func_index);
// Extract a portion of the wire bytes as UTF-8 string, optionally
// internalized. (Prefer to internalize early if the string will be used for a
@@ -747,19 +741,31 @@ class WasmIndirectFunctionTable : public Struct {
OBJECT_CONSTRUCTORS(WasmIndirectFunctionTable, Struct);
};
+class WasmFunctionData
+ : public TorqueGeneratedWasmFunctionData<WasmFunctionData, Foreign> {
+ public:
+ DECL_ACCESSORS(ref, Object)
+
+ DECL_CAST(WasmFunctionData)
+ DECL_PRINTER(WasmFunctionData)
+
+ TQ_OBJECT_CONSTRUCTORS(WasmFunctionData)
+};
+
// Information for a WasmExportedFunction which is referenced as the function
// data of the SharedFunctionInfo underlying the function. For details please
// see the {SharedFunctionInfo::HasWasmExportedFunctionData} predicate.
-class WasmExportedFunctionData : public Struct {
+class WasmExportedFunctionData : public WasmFunctionData {
public:
DECL_ACCESSORS(wrapper_code, Code)
+ // This is the instance that exported the function (which in case of
+ // imported and re-exported functions is different from the instance
+ // where the function is defined -- for the latter see WasmFunctionData::ref).
DECL_ACCESSORS(instance, WasmInstanceObject)
- DECL_INT_ACCESSORS(jump_table_offset)
DECL_INT_ACCESSORS(function_index)
DECL_ACCESSORS(signature, Foreign)
DECL_INT_ACCESSORS(wrapper_budget)
DECL_ACCESSORS(c_wrapper_code, Object)
- DECL_ACCESSORS(wasm_call_target, Object)
DECL_INT_ACCESSORS(packed_args_size)
inline wasm::FunctionSig* sig() const;
@@ -772,21 +778,22 @@ class WasmExportedFunctionData : public Struct {
// Layout description.
DEFINE_FIELD_OFFSET_CONSTANTS(
- HeapObject::kHeaderSize,
+ WasmFunctionData::kSize,
TORQUE_GENERATED_WASM_EXPORTED_FUNCTION_DATA_FIELDS)
- OBJECT_CONSTRUCTORS(WasmExportedFunctionData, Struct);
+ class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(WasmExportedFunctionData, WasmFunctionData);
};
// Information for a WasmJSFunction which is referenced as the function data of
// the SharedFunctionInfo underlying the function. For details please see the
// {SharedFunctionInfo::HasWasmJSFunctionData} predicate.
-class WasmJSFunctionData : public Struct {
+class WasmJSFunctionData : public WasmFunctionData {
public:
DECL_INT_ACCESSORS(serialized_return_count)
DECL_INT_ACCESSORS(serialized_parameter_count)
DECL_ACCESSORS(serialized_signature, PodArray<wasm::ValueType>)
- DECL_ACCESSORS(callable, JSReceiver)
DECL_ACCESSORS(wrapper_code, Code)
DECL_ACCESSORS(wasm_to_js_wrapper_code, Code)
@@ -797,10 +804,12 @@ class WasmJSFunctionData : public Struct {
DECL_VERIFIER(WasmJSFunctionData)
// Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+ DEFINE_FIELD_OFFSET_CONSTANTS(WasmFunctionData::kSize,
TORQUE_GENERATED_WASM_JS_FUNCTION_DATA_FIELDS)
- OBJECT_CONSTRUCTORS(WasmJSFunctionData, Struct);
+ class BodyDescriptor;
+
+ OBJECT_CONSTRUCTORS(WasmJSFunctionData, WasmFunctionData);
};
class WasmScript : public AllStatic {
@@ -915,6 +924,8 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, HeapObject> {
static inline wasm::StructType* type(Map map);
inline wasm::StructType* type() const;
static inline wasm::StructType* GcSafeType(Map map);
+ static inline int Size(const wasm::StructType* type);
+ static inline int GcSafeSize(Map map);
inline ObjectSlot RawField(int raw_offset);
diff --git a/chromium/v8/src/wasm/wasm-objects.tq b/chromium/v8/src/wasm/wasm-objects.tq
index adcf63ba879..13911e590df 100644
--- a/chromium/v8/src/wasm/wasm-objects.tq
+++ b/chromium/v8/src/wasm/wasm-objects.tq
@@ -11,22 +11,31 @@ type ManagedWasmNativeModule extends Foreign
extern class WasmInstanceObject extends JSObject;
-extern class WasmExportedFunctionData extends Struct {
+@generateCppClass
+extern class WasmFunctionData extends Foreign {
+ // This is the "reference" value that must be passed along in the "instance"
+ // register when calling the given function. It is either the target instance,
+ // or a pair holding the target instance and the callable; currently the
+ // latter is the case when the function being called is defined in JavaScript
+ // or via the C-API.
+ // For imported functions, this value equals the respective entry in
+ // the module's imported_function_refs array.
+ ref: WasmInstanceObject|Tuple2;
+}
+
+extern class WasmExportedFunctionData extends WasmFunctionData {
wrapper_code: Code;
instance: WasmInstanceObject;
- jump_table_offset: Smi;
function_index: Smi;
signature: Foreign;
wrapper_budget: Smi;
// The remaining fields are for fast calling from C++. The contract is
// that they are lazily populated, and either all will be present or none.
c_wrapper_code: Object;
- wasm_call_target: Smi|Foreign;
packed_args_size: Smi;
}
-extern class WasmJSFunctionData extends Struct {
- callable: JSReceiver;
+extern class WasmJSFunctionData extends WasmFunctionData {
wrapper_code: Code;
wasm_to_js_wrapper_code: Code;
serialized_return_count: Smi;
@@ -34,6 +43,7 @@ extern class WasmJSFunctionData extends Struct {
serialized_signature: PodArrayOfWasmValueType;
}
+// TODO(jkummerow): Derive from WasmFunctionData.
@export
class WasmCapiFunctionData extends HeapObject {
call_target: RawPtr;
@@ -107,6 +117,8 @@ extern class AsmWasmData extends Struct {
extern class WasmTypeInfo extends Foreign {
supertypes: FixedArray;
subtypes: ArrayList;
+ // In bytes, used for struct allocation.
+ instance_size: Smi;
}
@generateCppClass
diff --git a/chromium/v8/src/wasm/wasm-opcodes-inl.h b/chromium/v8/src/wasm/wasm-opcodes-inl.h
index 6b124b2dbc3..a63b46ee0e3 100644
--- a/chromium/v8/src/wasm/wasm-opcodes-inl.h
+++ b/chromium/v8/src/wasm/wasm-opcodes-inl.h
@@ -162,6 +162,7 @@ constexpr const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_OP(CallRef, "call_ref")
CASE_OP(ReturnCallRef, "return_call_ref")
CASE_OP(BrOnNull, "br_on_null")
+ CASE_OP(BrOnNonNull, "br_on_non_null")
CASE_OP(Drop, "drop")
CASE_OP(Select, "select")
CASE_OP(SelectWithType, "select")
@@ -629,16 +630,12 @@ constexpr const FunctionSig* WasmOpcodes::Signature(WasmOpcode opcode) {
case kNumericPrefix:
return impl::kCachedSigs[impl::kNumericExprSigTable[opcode & 0xFF]];
default:
-#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE(); // invalid prefix.
-#else
- return nullptr;
-#endif
}
}
constexpr const FunctionSig* WasmOpcodes::AsmjsSignature(WasmOpcode opcode) {
- CONSTEXPR_DCHECK(opcode < impl::kSimpleAsmjsExprSigTable.size());
+ DCHECK_GT(impl::kSimpleAsmjsExprSigTable.size(), opcode);
return impl::kCachedSigs[impl::kSimpleAsmjsExprSigTable[opcode]];
}
diff --git a/chromium/v8/src/wasm/wasm-opcodes.cc b/chromium/v8/src/wasm/wasm-opcodes.cc
index 94618077519..e4c0d19a058 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.cc
+++ b/chromium/v8/src/wasm/wasm-opcodes.cc
@@ -35,9 +35,6 @@ std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
// https://chromium-review.googlesource.com/c/v8/v8/+/2413251).
bool IsJSCompatibleSignature(const FunctionSig* sig, const WasmModule* module,
const WasmFeatures& enabled_features) {
- if (!enabled_features.has_mv() && sig->return_count() > 1) {
- return false;
- }
for (auto type : sig->all()) {
// TODO(7748): Allow structs, arrays, and rtts when their JS-interaction is
// decided on.
diff --git a/chromium/v8/src/wasm/wasm-opcodes.h b/chromium/v8/src/wasm/wasm-opcodes.h
index 909cacadd25..8bd8d1d4634 100644
--- a/chromium/v8/src/wasm/wasm-opcodes.h
+++ b/chromium/v8/src/wasm/wasm-opcodes.h
@@ -52,6 +52,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(Delegate, 0x18, _ /* eh_prototype */) \
V(CatchAll, 0x19, _ /* eh_prototype */) \
V(BrOnNull, 0xd4, _ /* gc prototype */) \
+ V(BrOnNonNull, 0xd6, _ /* gc prototype */) \
V(NopForTestingUnsupportedInLiftoff, 0x16, _)
// Constants, locals, globals, and calls.
@@ -252,37 +253,37 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
// These opcodes are not spec'ed (or visible) externally; the idea is
// to use unused ranges for internal purposes.
#define FOREACH_ASMJS_COMPAT_OPCODE(V) \
- V(F64Acos, 0xc5, d_d) \
- V(F64Asin, 0xc6, d_d) \
- V(F64Atan, 0xc7, d_d) \
- V(F64Cos, 0xc8, d_d) \
- V(F64Sin, 0xc9, d_d) \
- V(F64Tan, 0xca, d_d) \
- V(F64Exp, 0xcb, d_d) \
- V(F64Log, 0xcc, d_d) \
- V(F64Atan2, 0xcd, d_dd) \
- V(F64Pow, 0xce, d_dd) \
- V(F64Mod, 0xcf, d_dd) \
+ V(F64Acos, 0xdc, d_d) \
+ V(F64Asin, 0xdd, d_d) \
+ V(F64Atan, 0xde, d_d) \
+ V(F64Cos, 0xdf, d_d) \
+ V(F64Sin, 0xe0, d_d) \
+ V(F64Tan, 0xe1, d_d) \
+ V(F64Exp, 0xe2, d_d) \
+ V(F64Log, 0xe3, d_d) \
+ V(F64Atan2, 0xe4, d_dd) \
+ V(F64Pow, 0xe5, d_dd) \
+ V(F64Mod, 0xe6, d_dd) \
V(I32AsmjsDivS, 0xe7, i_ii) \
V(I32AsmjsDivU, 0xe8, i_ii) \
V(I32AsmjsRemS, 0xe9, i_ii) \
- V(I32AsmjsRemU, 0xd6, i_ii) \
- V(I32AsmjsLoadMem8S, 0xd7, i_i) \
- V(I32AsmjsLoadMem8U, 0xd8, i_i) \
- V(I32AsmjsLoadMem16S, 0xd9, i_i) \
- V(I32AsmjsLoadMem16U, 0xda, i_i) \
- V(I32AsmjsLoadMem, 0xdb, i_i) \
- V(F32AsmjsLoadMem, 0xdc, f_i) \
- V(F64AsmjsLoadMem, 0xdd, d_i) \
- V(I32AsmjsStoreMem8, 0xde, i_ii) \
- V(I32AsmjsStoreMem16, 0xdf, i_ii) \
- V(I32AsmjsStoreMem, 0xe0, i_ii) \
- V(F32AsmjsStoreMem, 0xe1, f_if) \
- V(F64AsmjsStoreMem, 0xe2, d_id) \
- V(I32AsmjsSConvertF32, 0xe3, i_f) \
- V(I32AsmjsUConvertF32, 0xe4, i_f) \
- V(I32AsmjsSConvertF64, 0xe5, i_d) \
- V(I32AsmjsUConvertF64, 0xe6, i_d)
+ V(I32AsmjsRemU, 0xea, i_ii) \
+ V(I32AsmjsLoadMem8S, 0xeb, i_i) \
+ V(I32AsmjsLoadMem8U, 0xec, i_i) \
+ V(I32AsmjsLoadMem16S, 0xed, i_i) \
+ V(I32AsmjsLoadMem16U, 0xee, i_i) \
+ V(I32AsmjsLoadMem, 0xef, i_i) \
+ V(F32AsmjsLoadMem, 0xf0, f_i) \
+ V(F64AsmjsLoadMem, 0xf1, d_i) \
+ V(I32AsmjsStoreMem8, 0xf2, i_ii) \
+ V(I32AsmjsStoreMem16, 0xf3, i_ii) \
+ V(I32AsmjsStoreMem, 0xf4, i_ii) \
+ V(F32AsmjsStoreMem, 0xf5, f_if) \
+ V(F64AsmjsStoreMem, 0xf6, d_id) \
+ V(I32AsmjsSConvertF32, 0xf7, i_f) \
+ V(I32AsmjsUConvertF32, 0xf8, i_f) \
+ V(I32AsmjsSConvertF64, 0xf9, i_d) \
+ V(I32AsmjsUConvertF64, 0xfa, i_d)
#define FOREACH_SIMD_MEM_OPCODE(V) \
V(S128LoadMem, 0xfd00, s_i) \
@@ -759,10 +760,10 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig,
V(s_is, kWasmS128, kWasmI32, kWasmS128)
#define FOREACH_PREFIX(V) \
+ V(GC, 0xfb) \
V(Numeric, 0xfc) \
V(Simd, 0xfd) \
- V(Atomic, 0xfe) \
- V(GC, 0xfb)
+ V(Atomic, 0xfe)
enum WasmOpcode {
// Declare expression opcodes.
@@ -801,129 +802,6 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static inline const char* TrapReasonMessage(TrapReason);
};
-// Representation of an initializer expression.
-class WasmInitExpr {
- public:
- enum Operator {
- kNone,
- kGlobalGet,
- kI32Const,
- kI64Const,
- kF32Const,
- kF64Const,
- kS128Const,
- kRefNullConst,
- kRefFuncConst,
- kRttCanon,
- kRttSub
- };
-
- union Immediate {
- int32_t i32_const;
- int64_t i64_const;
- float f32_const;
- double f64_const;
- std::array<uint8_t, kSimd128Size> s128_const;
- uint32_t index;
- HeapType::Representation heap_type;
- };
-
- WasmInitExpr() : kind_(kNone) { immediate_.i32_const = 0; }
- explicit WasmInitExpr(int32_t v) : kind_(kI32Const) {
- immediate_.i32_const = v;
- }
- explicit WasmInitExpr(int64_t v) : kind_(kI64Const) {
- immediate_.i64_const = v;
- }
- explicit WasmInitExpr(float v) : kind_(kF32Const) {
- immediate_.f32_const = v;
- }
- explicit WasmInitExpr(double v) : kind_(kF64Const) {
- immediate_.f64_const = v;
- }
- explicit WasmInitExpr(uint8_t v[kSimd128Size]) : kind_(kS128Const) {
- base::Memcpy(immediate_.s128_const.data(), v, kSimd128Size);
- }
-
- MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmInitExpr);
-
- static WasmInitExpr GlobalGet(uint32_t index) {
- WasmInitExpr expr;
- expr.kind_ = kGlobalGet;
- expr.immediate_.index = index;
- return expr;
- }
-
- static WasmInitExpr RefFuncConst(uint32_t index) {
- WasmInitExpr expr;
- expr.kind_ = kRefFuncConst;
- expr.immediate_.index = index;
- return expr;
- }
-
- static WasmInitExpr RefNullConst(HeapType::Representation heap_type) {
- WasmInitExpr expr;
- expr.kind_ = kRefNullConst;
- expr.immediate_.heap_type = heap_type;
- return expr;
- }
-
- static WasmInitExpr RttCanon(uint32_t index) {
- WasmInitExpr expr;
- expr.kind_ = kRttCanon;
- expr.immediate_.index = index;
- return expr;
- }
-
- static WasmInitExpr RttSub(uint32_t index, WasmInitExpr supertype) {
- WasmInitExpr expr;
- expr.kind_ = kRttSub;
- expr.immediate_.index = index;
- expr.operand_ = std::make_unique<WasmInitExpr>(std::move(supertype));
- return expr;
- }
-
- Immediate immediate() const { return immediate_; }
- Operator kind() const { return kind_; }
- WasmInitExpr* operand() const { return operand_.get(); }
-
- bool operator==(const WasmInitExpr& other) const {
- if (kind() != other.kind()) return false;
- switch (kind()) {
- case kNone:
- return true;
- case kGlobalGet:
- case kRefFuncConst:
- case kRttCanon:
- return immediate().index == other.immediate().index;
- case kI32Const:
- return immediate().i32_const == other.immediate().i32_const;
- case kI64Const:
- return immediate().i64_const == other.immediate().i64_const;
- case kF32Const:
- return immediate().f32_const == other.immediate().f32_const;
- case kF64Const:
- return immediate().f64_const == other.immediate().f64_const;
- case kS128Const:
- return immediate().s128_const == other.immediate().s128_const;
- case kRefNullConst:
- return immediate().heap_type == other.immediate().heap_type;
- case kRttSub:
- return immediate().index == other.immediate().index &&
- *operand() == *other.operand();
- }
- }
-
- V8_INLINE bool operator!=(const WasmInitExpr& other) {
- return !(*this == other);
- }
-
- private:
- Immediate immediate_;
- Operator kind_;
- std::unique_ptr<WasmInitExpr> operand_ = nullptr;
-};
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/chromium/v8/src/wasm/wasm-serialization.cc b/chromium/v8/src/wasm/wasm-serialization.cc
index b2e6f0c4d8a..a47e420cb11 100644
--- a/chromium/v8/src/wasm/wasm-serialization.cc
+++ b/chromium/v8/src/wasm/wasm-serialization.cc
@@ -567,6 +567,8 @@ class CopyAndRelocTask : public JobTask {
void Run(JobDelegate* delegate) override {
CODE_SPACE_WRITE_SCOPE
+ NativeModuleModificationScope native_module_modification_scope(
+ deserializer_->native_module_);
do {
auto batch = from_queue_->Pop();
if (batch.empty()) break;
@@ -712,11 +714,9 @@ DeserializationUnit NativeModuleDeserializer::ReadCode(int fn_index,
constexpr size_t kMaxReservation =
RoundUp<kCodeAlignment>(WasmCodeAllocator::kMaxCodeSpaceSize * 9 / 10);
size_t code_space_size = std::min(kMaxReservation, remaining_code_size_);
- current_code_space_ =
+ std::tie(current_code_space_, current_jump_tables_) =
native_module_->AllocateForDeserializedCode(code_space_size);
DCHECK_EQ(current_code_space_.size(), code_space_size);
- current_jump_tables_ = native_module_->FindJumpTablesForRegion(
- base::AddressRegionOf(current_code_space_));
DCHECK(current_jump_tables_.is_valid());
}
diff --git a/chromium/v8/src/wasm/wasm-subtyping.cc b/chromium/v8/src/wasm/wasm-subtyping.cc
index b0e8105a605..d2b7e9fe31d 100644
--- a/chromium/v8/src/wasm/wasm-subtyping.cc
+++ b/chromium/v8/src/wasm/wasm-subtyping.cc
@@ -91,6 +91,26 @@ class TypeJudgementCache {
type_equivalence_cache_.erase(
std::make_tuple(type1, type2, module1, module2));
}
+ void delete_module(const WasmModule* module) {
+ for (auto iterator = type_equivalence_cache_.begin();
+ iterator != type_equivalence_cache_.end();) {
+ if (std::get<2>(*iterator) == module ||
+ std::get<3>(*iterator) == module) {
+ iterator = type_equivalence_cache_.erase(iterator);
+ } else {
+ iterator++;
+ }
+ }
+ for (auto iterator = subtyping_cache_.begin();
+ iterator != subtyping_cache_.end();) {
+ if (std::get<2>(*iterator) == module ||
+ std::get<3>(*iterator) == module) {
+ iterator = subtyping_cache_.erase(iterator);
+ } else {
+ iterator++;
+ }
+ }
+ }
private:
Zone zone_;
@@ -258,14 +278,46 @@ bool ArrayIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
}
}
-// TODO(7748): Expand this with function subtyping when it is introduced.
bool FunctionIsSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
const WasmModule* sub_module,
const WasmModule* super_module) {
- return FunctionEquivalentIndices(subtype_index, supertype_index, sub_module,
- super_module);
-}
+ if (!FLAG_experimental_wasm_gc) {
+ return FunctionEquivalentIndices(subtype_index, supertype_index, sub_module,
+ super_module);
+ }
+ const FunctionSig* sub_func = sub_module->types[subtype_index].function_sig;
+ const FunctionSig* super_func =
+ super_module->types[supertype_index].function_sig;
+
+ if (sub_func->parameter_count() != super_func->parameter_count() ||
+ sub_func->return_count() != super_func->return_count()) {
+ return false;
+ }
+
+ TypeJudgementCache::instance()->cache_subtype(subtype_index, supertype_index,
+ sub_module, super_module);
+
+ for (uint32_t i = 0; i < sub_func->parameter_count(); i++) {
+ // Contravariance for params.
+ if (!IsSubtypeOf(super_func->parameters()[i], sub_func->parameters()[i],
+ super_module, sub_module)) {
+ TypeJudgementCache::instance()->uncache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ return false;
+ }
+ }
+ for (uint32_t i = 0; i < sub_func->return_count(); i++) {
+ // Covariance for returns.
+ if (!IsSubtypeOf(sub_func->returns()[i], super_func->returns()[i],
+ sub_module, super_module)) {
+ TypeJudgementCache::instance()->uncache_subtype(
+ subtype_index, supertype_index, sub_module, super_module);
+ return false;
+ }
+ }
+ return true;
+}
} // namespace
V8_NOINLINE V8_EXPORT_PRIVATE bool IsSubtypeOfImpl(
@@ -403,11 +455,12 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
module2);
}
-ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module) {
- if (a == b) return a;
- if (IsSubtypeOf(a, b, module)) return a;
- if (IsSubtypeOf(b, a, module)) return b;
- return kWasmBottom;
+void DeleteCachedTypeJudgementsForModule(const WasmModule* module) {
+ // Accessing the caches for subtyping and equivalence from multiple background
+ // threads is protected by a lock.
+ base::RecursiveMutexGuard type_cache_access(
+ TypeJudgementCache::instance()->type_cache_mutex());
+ TypeJudgementCache::instance()->delete_module(module);
}
} // namespace wasm
diff --git a/chromium/v8/src/wasm/wasm-subtyping.h b/chromium/v8/src/wasm/wasm-subtyping.h
index 0c35f7c4708..59e7935d1f1 100644
--- a/chromium/v8/src/wasm/wasm-subtyping.h
+++ b/chromium/v8/src/wasm/wasm-subtyping.h
@@ -60,8 +60,10 @@ V8_NOINLINE bool EquivalentTypes(ValueType type1, ValueType type2,
// - Struct subtyping: Subtype must have at least as many fields as supertype,
// covariance for immutable fields, equivalence for mutable fields.
// - Array subtyping (mutable only) is the equivalence relation.
-// - Function subtyping is the equivalence relation (note: this rule might
-// change in the future to include type variance).
+// - Function subtyping depends on the enabled wasm features: if
+// --experimental-wasm-gc is enabled, then subtyping is computed
+// contravariantly for parameter types and covariantly for return types.
+// Otherwise, the subtyping relation is the equivalence relation.
V8_INLINE bool IsSubtypeOf(ValueType subtype, ValueType supertype,
const WasmModule* sub_module,
const WasmModule* super_module) {
@@ -91,11 +93,9 @@ V8_INLINE bool IsHeapSubtypeOf(uint32_t subtype_index, uint32_t supertype_index,
ValueType::Ref(supertype_index, kNonNullable), module);
}
-// Returns the weakest type that is a subtype of both a and b
-// (which is currently always one of a, b, or kWasmBottom).
-// TODO(manoskouk): Update this once we have settled on a type system for
-// reference types.
-ValueType CommonSubtype(ValueType a, ValueType b, const WasmModule* module);
+// Call this function in {module}'s destructor to avoid spurious cache hits in
+// case another WasmModule gets allocated in the same address later.
+void DeleteCachedTypeJudgementsForModule(const WasmModule* module);
} // namespace wasm
} // namespace internal
diff --git a/chromium/v8/src/wasm/wasm-value.h b/chromium/v8/src/wasm/wasm-value.h
index 0a1d2b69e25..faaad180760 100644
--- a/chromium/v8/src/wasm/wasm-value.h
+++ b/chromium/v8/src/wasm/wasm-value.h
@@ -12,7 +12,7 @@
#include "src/base/memory.h"
#include "src/handles/handles.h"
#include "src/utils/boxed-float.h"
-#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/value-type.h"
#include "src/zone/zone-containers.h"
namespace v8 {
diff --git a/chromium/v8/src/web-snapshot/web-snapshot.cc b/chromium/v8/src/web-snapshot/web-snapshot.cc
index 16bbc9af3e4..f36698a351e 100644
--- a/chromium/v8/src/web-snapshot/web-snapshot.cc
+++ b/chromium/v8/src/web-snapshot/web-snapshot.cc
@@ -11,6 +11,7 @@
#include "src/base/platform/wrappers.h"
#include "src/handles/handles.h"
#include "src/objects/contexts.h"
+#include "src/objects/js-regexp-inl.h"
#include "src/objects/script.h"
namespace v8 {
@@ -34,8 +35,8 @@ void WebSnapshotSerializerDeserializer::Throw(const char* message) {
error_message_ = message;
if (!isolate_->has_pending_exception()) {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- v8_isolate->ThrowException(v8::Exception::Error(
- v8::String::NewFromUtf8(v8_isolate, message).ToLocalChecked()));
+ v8_isolate->ThrowError(
+ v8::String::NewFromUtf8(v8_isolate, message).ToLocalChecked());
}
}
@@ -238,7 +239,10 @@ void WebSnapshotSerializer::SerializeMap(Handle<Map> map, uint32_t& id) {
// Format (serialized function):
// - 0 if there's no context, 1 + context id otherwise
-// - String id (source string)
+// - String id (source snippet)
+// - Start position in the source snippet
+// - Length in the source snippet
+// TODO(v8:11525): Investigate whether the length is really needed.
void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
uint32_t& id) {
if (InsertIntoIndexMap(function_ids_, function, id)) {
@@ -251,7 +255,7 @@ void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
}
Handle<Context> context(function->context(), isolate_);
- if (context->IsNativeContext()) {
+ if (context->IsNativeContext() || context->IsScriptContext()) {
function_serializer_.WriteUint32(0);
} else {
DCHECK(context->IsFunctionContext());
@@ -260,20 +264,19 @@ void WebSnapshotSerializer::SerializeFunction(Handle<JSFunction> function,
function_serializer_.WriteUint32(context_id + 1);
}
- // TODO(v8:11525): For inner functions which occur inside a serialized
- // function, create a "substring" type, so that we don't need to serialize the
- // same content twice.
+ // TODO(v8:11525): Don't write the full source but instead, a set of minimal
+ // snippets which cover the serialized functions.
Handle<String> full_source(
String::cast(Script::cast(function->shared().script()).source()),
isolate_);
- int start = function->shared().StartPosition();
- int end = function->shared().EndPosition();
- Handle<String> source =
- isolate_->factory()->NewSubString(full_source, start, end);
uint32_t source_id = 0;
- SerializeString(source, source_id);
+ SerializeString(full_source, source_id);
function_serializer_.WriteUint32(source_id);
+ int start = function->shared().StartPosition();
+ function_serializer_.WriteUint32(start);
+ int end = function->shared().EndPosition();
+ function_serializer_.WriteUint32(end - start);
// TODO(v8:11525): Serialize .prototype.
// TODO(v8:11525): Support properties in functions.
}
@@ -297,7 +300,8 @@ void WebSnapshotSerializer::SerializeContext(Handle<Context> context,
}
uint32_t parent_context_id = 0;
- if (!context->previous().IsNativeContext()) {
+ if (!context->previous().IsNativeContext() &&
+ !context->previous().IsScriptContext()) {
SerializeContext(handle(context->previous(), isolate_), parent_context_id);
++parent_context_id;
}
@@ -387,18 +391,35 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
ValueSerializer& serializer) {
uint32_t id = 0;
if (object->IsSmi()) {
- // TODO(v8:11525): Implement.
- UNREACHABLE();
+ serializer.WriteUint32(ValueType::INTEGER);
+ serializer.WriteZigZag<int32_t>(Smi::cast(*object).value());
+ return;
}
DCHECK(object->IsHeapObject());
switch (HeapObject::cast(*object).map().instance_type()) {
case ODDBALL_TYPE:
- // TODO(v8:11525): Implement.
- UNREACHABLE();
+ switch (Oddball::cast(*object).kind()) {
+ case Oddball::kFalse:
+ serializer.WriteUint32(ValueType::FALSE_CONSTANT);
+ return;
+ case Oddball::kTrue:
+ serializer.WriteUint32(ValueType::TRUE_CONSTANT);
+ return;
+ case Oddball::kNull:
+ serializer.WriteUint32(ValueType::NULL_CONSTANT);
+ return;
+ case Oddball::kUndefined:
+ serializer.WriteUint32(ValueType::UNDEFINED_CONSTANT);
+ return;
+ default:
+ UNREACHABLE();
+ }
case HEAP_NUMBER_TYPE:
- // TODO(v8:11525): Implement.
- UNREACHABLE();
+ // TODO(v8:11525): Handle possible endianness mismatch.
+ serializer.WriteUint32(ValueType::DOUBLE);
+ serializer.WriteDouble(HeapNumber::cast(*object).value());
+ break;
case JS_FUNCTION_TYPE:
SerializeFunction(Handle<JSFunction>::cast(object), id);
serializer.WriteUint32(ValueType::FUNCTION_ID);
@@ -409,6 +430,23 @@ void WebSnapshotSerializer::WriteValue(Handle<Object> object,
serializer.WriteUint32(ValueType::OBJECT_ID);
serializer.WriteUint32(id);
break;
+ case JS_REG_EXP_TYPE: {
+ Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
+ if (regexp->map() != isolate_->regexp_function()->initial_map()) {
+ Throw("Web snapshot: Unsupported RegExp map");
+ return;
+ }
+ uint32_t pattern_id, flags_id;
+ Handle<String> pattern = handle(regexp->Pattern(), isolate_);
+ Handle<String> flags_string =
+ JSRegExp::StringFromFlags(isolate_, regexp->GetFlags());
+ SerializeString(pattern, pattern_id);
+ SerializeString(flags_string, flags_id);
+ serializer.WriteUint32(ValueType::REGEXP);
+ serializer.WriteUint32(pattern_id);
+ serializer.WriteUint32(flags_id);
+ break;
+ }
default:
if (object->IsString()) {
SerializeString(Handle<String>::cast(object), id);
@@ -669,8 +707,21 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
Throw("Web snapshot: Malformed function table");
return;
}
- STATIC_ASSERT(kMaxItemCount <= FixedArray::kMaxLength);
+ STATIC_ASSERT(kMaxItemCount + 1 <= FixedArray::kMaxLength);
functions_ = isolate_->factory()->NewFixedArray(function_count_);
+
+ Handle<Script> script =
+ isolate_->factory()->NewScript(isolate_->factory()->empty_string());
+ script->set_type(Script::TYPE_WEB_SNAPSHOT);
+ // Overallocate the array for SharedFunctionInfos; functions which we
+ // deserialize soon will create more SharedFunctionInfos when called.
+ Handle<WeakFixedArray> infos(isolate_->factory()->NewWeakFixedArray(
+ WeakArrayList::CapacityForLength(function_count_ + 1),
+ AllocationType::kOld));
+ script->set_shared_function_infos(*infos);
+ Handle<ObjectHashTable> shared_function_info_table =
+ ObjectHashTable::New(isolate_, function_count_);
+
for (uint32_t i = 0; i < function_count_; ++i) {
uint32_t context_id;
// Note: > (not >= on purpose, we will subtract 1).
@@ -681,10 +732,24 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
}
Handle<String> source = ReadString(false);
+ if (i == 0) {
+ script->set_source(*source);
+ } else {
+ // TODO(v8:11525): Support multiple source snippets.
+ DCHECK_EQ(script->source(), *source);
+ }
+
+ uint32_t start_position;
+ uint32_t length;
+ if (!deserializer_->ReadUint32(&start_position) ||
+ !deserializer_->ReadUint32(&length)) {
+ Throw("Web snapshot: Malformed function");
+ return;
+ }
// TODO(v8:11525): Support other function kinds.
// TODO(v8:11525): Support (exported) top level functions.
- Handle<Script> script = isolate_->factory()->NewScript(source);
+
// TODO(v8:11525): Deduplicate the SFIs for inner functions the user creates
// post-deserialization (by calling the outer function, if it's also in the
// snapshot) against the ones we create here.
@@ -692,18 +757,24 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
isolate_->factory()->NewSharedFunctionInfo(
isolate_->factory()->empty_string(), MaybeHandle<Code>(),
Builtins::kCompileLazy, FunctionKind::kNormalFunction);
- shared->set_function_literal_id(1);
+ shared->set_script(*script);
+ // Index 0 is reserved for top-level shared function info (which web
+ // snapshot scripts don't have).
+ const int shared_function_info_index = i + 1;
+ shared->set_function_literal_id(shared_function_info_index);
// TODO(v8:11525): Decide how to handle language modes.
shared->set_language_mode(LanguageMode::kStrict);
shared->set_uncompiled_data(
*isolate_->factory()->NewUncompiledDataWithoutPreparseData(
- ReadOnlyRoots(isolate_).empty_string_handle(), 0,
- source->length()));
- shared->set_script(*script);
- Handle<WeakFixedArray> infos(
- isolate_->factory()->NewWeakFixedArray(3, AllocationType::kOld));
- infos->Set(1, HeapObjectReference::Weak(*shared));
- script->set_shared_function_infos(*infos);
+ ReadOnlyRoots(isolate_).empty_string_handle(), start_position,
+ start_position + length));
+ shared->set_allows_lazy_compilation(true);
+ infos->Set(shared_function_info_index, HeapObjectReference::Weak(*shared));
+
+ shared_function_info_table = ObjectHashTable::Put(
+ shared_function_info_table,
+ handle(Smi::FromInt(start_position), isolate_),
+ handle(Smi::FromInt(shared_function_info_index), isolate_));
Handle<JSFunction> function =
Factory::JSFunctionBuilder(isolate_, shared, isolate_->native_context())
@@ -718,6 +789,7 @@ void WebSnapshotDeserializer::DeserializeFunctions() {
}
functions_->set(i, *function);
}
+ script->set_shared_function_info_table(*shared_function_info_table);
}
void WebSnapshotDeserializer::DeserializeObjects() {
@@ -806,6 +878,46 @@ void WebSnapshotDeserializer::ReadValue(Handle<Object>& value,
return;
}
switch (value_type) {
+ case ValueType::FALSE_CONSTANT: {
+ value = handle(ReadOnlyRoots(isolate_).false_value(), isolate_);
+ representation = Representation::Tagged();
+ break;
+ }
+ case ValueType::TRUE_CONSTANT: {
+ value = handle(ReadOnlyRoots(isolate_).true_value(), isolate_);
+ representation = Representation::Tagged();
+ break;
+ }
+ case ValueType::NULL_CONSTANT: {
+ value = handle(ReadOnlyRoots(isolate_).null_value(), isolate_);
+ representation = Representation::Tagged();
+ break;
+ }
+ case ValueType::UNDEFINED_CONSTANT: {
+ value = handle(ReadOnlyRoots(isolate_).undefined_value(), isolate_);
+ representation = Representation::Tagged();
+ break;
+ }
+ case ValueType::INTEGER: {
+ Maybe<int32_t> number = deserializer_->ReadZigZag<int32_t>();
+ if (number.IsNothing()) {
+ Throw("Web snapshot: Malformed integer");
+ return;
+ }
+ value = isolate_->factory()->NewNumberFromInt(number.FromJust());
+ representation = Representation::Tagged();
+ break;
+ }
+ case ValueType::DOUBLE: {
+ double number;
+ if (!deserializer_->ReadDouble(&number)) {
+ Throw("Web snapshot: Malformed double");
+ return;
+ }
+ value = isolate_->factory()->NewNumber(number);
+ representation = Representation::Tagged();
+ break;
+ }
case ValueType::STRING_ID: {
value = ReadString(false);
representation = Representation::Tagged();
@@ -834,6 +946,25 @@ void WebSnapshotDeserializer::ReadValue(Handle<Object>& value,
value = handle(functions_->get(function_id), isolate_);
representation = Representation::Tagged();
break;
+ case ValueType::REGEXP: {
+ Handle<String> pattern = ReadString(false);
+ Handle<String> flags_string = ReadString(false);
+ bool success = false;
+ JSRegExp::Flags flags =
+ JSRegExp::FlagsFromString(isolate_, flags_string, &success);
+ if (!success) {
+ Throw("Web snapshot: Malformed flags in regular expression");
+ return;
+ }
+ MaybeHandle<JSRegExp> maybe_regexp =
+ JSRegExp::New(isolate_, pattern, flags);
+ if (!maybe_regexp.ToHandle(&value)) {
+ Throw("Web snapshot: Malformed RegExp");
+ return;
+ }
+ representation = Representation::Tagged();
+ break;
+ }
default:
// TODO(v8:11525): Handle other value types.
Throw("Web snapshot: Unsupported value type");
diff --git a/chromium/v8/src/web-snapshot/web-snapshot.h b/chromium/v8/src/web-snapshot/web-snapshot.h
index 31461c8dbc5..8127fa7aa8d 100644
--- a/chromium/v8/src/web-snapshot/web-snapshot.h
+++ b/chromium/v8/src/web-snapshot/web-snapshot.h
@@ -38,11 +38,22 @@ class WebSnapshotSerializerDeserializer {
inline bool has_error() const { return error_message_ != nullptr; }
const char* error_message() const { return error_message_; }
- enum ValueType : uint8_t { STRING_ID, OBJECT_ID, FUNCTION_ID };
+ enum ValueType : uint8_t {
+ FALSE_CONSTANT,
+ TRUE_CONSTANT,
+ NULL_CONSTANT,
+ UNDEFINED_CONSTANT,
+ INTEGER,
+ DOUBLE,
+ STRING_ID,
+ OBJECT_ID,
+ FUNCTION_ID,
+ REGEXP
+ };
// The maximum count of items for each value type (strings, objects etc.)
static constexpr uint32_t kMaxItemCount =
- static_cast<uint32_t>(FixedArray::kMaxLength);
+ static_cast<uint32_t>(FixedArray::kMaxLength - 1);
// This ensures indices and lengths can be converted between uint32_t and int
// without problems:
STATIC_ASSERT(kMaxItemCount < std::numeric_limits<int32_t>::max());
diff --git a/chromium/v8/src/zone/zone-segment.cc b/chromium/v8/src/zone/zone-segment.cc
index 49cedb851b8..6b682624dce 100644
--- a/chromium/v8/src/zone/zone-segment.cc
+++ b/chromium/v8/src/zone/zone-segment.cc
@@ -4,7 +4,7 @@
#include "src/zone/zone-segment.h"
-#include "src/sanitizer/msan.h"
+#include "src/base/sanitizer/msan.h"
namespace v8 {
namespace internal {
diff --git a/chromium/v8/src/zone/zone.cc b/chromium/v8/src/zone/zone.cc
index e40b92be68a..40cf1ab2df8 100644
--- a/chromium/v8/src/zone/zone.cc
+++ b/chromium/v8/src/zone/zone.cc
@@ -7,8 +7,8 @@
#include <cstring>
#include <memory>
+#include "src/base/sanitizer/asan.h"
#include "src/init/v8.h"
-#include "src/sanitizer/asan.h"
#include "src/utils/utils.h"
#include "src/zone/type-stats.h"
diff --git a/chromium/v8/test/benchmarks/cpp/BUILD.gn b/chromium/v8/test/benchmarks/cpp/BUILD.gn
index 6c579528414..07eeb94f159 100644
--- a/chromium/v8/test/benchmarks/cpp/BUILD.gn
+++ b/chromium/v8/test/benchmarks/cpp/BUILD.gn
@@ -25,6 +25,9 @@ if (v8_enable_google_benchmark) {
sources = [ "empty.cc" ]
- deps = [ "//third_party/google_benchmark:benchmark_main" ]
+ deps = [
+ "//:v8_libbase",
+ "//third_party/google_benchmark:benchmark_main",
+ ]
}
}
diff --git a/chromium/v8/test/cctest/BUILD.gn b/chromium/v8/test/cctest/BUILD.gn
index ffa4e3a136a..8f6e5610b84 100644
--- a/chromium/v8/test/cctest/BUILD.gn
+++ b/chromium/v8/test/cctest/BUILD.gn
@@ -127,6 +127,7 @@ v8_source_set("cctest_sources") {
"compiler/test-run-unwinding-info.cc",
"compiler/test-run-variables.cc",
"compiler/test-sloppy-equality.cc",
+ "compiler/test-verify-type.cc",
"compiler/value-helper.cc",
"compiler/value-helper.h",
"disasm-regex-helper.cc",
@@ -156,6 +157,7 @@ v8_source_set("cctest_sources") {
"heap/test-mark-compact.cc",
"heap/test-memory-measurement.cc",
"heap/test-page-promotion.cc",
+ "heap/test-shared-heap.cc",
"heap/test-spaces.cc",
"heap/test-unmapper.cc",
"heap/test-weak-references.cc",
@@ -260,6 +262,7 @@ v8_source_set("cctest_sources") {
"test-platform.cc",
"test-profile-generator.cc",
"test-property-details.cc",
+ "test-ptr-compr-cage.cc",
"test-random-number-generator.cc",
"test-regexp.cc",
"test-representation.cc",
@@ -438,7 +441,6 @@ v8_source_set("cctest_sources") {
"wasm/test-run-wasm-relaxed-simd.cc",
"wasm/test-run-wasm-sign-extension.cc",
"wasm/test-run-wasm-simd-liftoff.cc",
- "wasm/test-run-wasm-simd-scalar-lowering.cc",
"wasm/test-run-wasm-simd.cc",
"wasm/test-run-wasm-wrappers.cc",
"wasm/test-run-wasm.cc",
diff --git a/chromium/v8/test/mjsunit/BUILD.gn b/chromium/v8/test/mjsunit/BUILD.gn
index 1acef4ef642..2bdfd797d5b 100644
--- a/chromium/v8/test/mjsunit/BUILD.gn
+++ b/chromium/v8/test/mjsunit/BUILD.gn
@@ -24,14 +24,10 @@ group("v8_mjsunit") {
"../../tools/profile.mjs",
"../../tools/profile_view.mjs",
"../../tools/splaytree.mjs",
- "../../tools/system-analyzer/helper.mjs",
- "../../tools/system-analyzer/log/api.mjs",
- "../../tools/system-analyzer/log/code.mjs",
- "../../tools/system-analyzer/log/ic.mjs",
- "../../tools/system-analyzer/log/log.mjs",
- "../../tools/system-analyzer/log/map.mjs",
+ "../../tools/system-analyzer/log/",
"../../tools/system-analyzer/processor.mjs",
"../../tools/system-analyzer/timeline.mjs",
+ "../../tools/system-analyzer/helper.mjs",
"../../tools/tickprocessor.mjs",
]
}
diff --git a/chromium/v8/test/unittests/BUILD.gn b/chromium/v8/test/unittests/BUILD.gn
index 43858603dcb..f47b4994133 100644
--- a/chromium/v8/test/unittests/BUILD.gn
+++ b/chromium/v8/test/unittests/BUILD.gn
@@ -86,6 +86,7 @@ v8_source_set("cppgc_unittests_sources") {
testonly = true
sources = [
+ "heap/cppgc/allocation-unittest.cc",
"heap/cppgc/compactor-unittest.cc",
"heap/cppgc/concurrent-marking-unittest.cc",
"heap/cppgc/concurrent-sweeper-unittest.cc",
@@ -101,6 +102,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/heap-growing-unittest.cc",
"heap/cppgc/heap-object-header-unittest.cc",
"heap/cppgc/heap-page-unittest.cc",
+ "heap/cppgc/heap-registry-unittest.cc",
"heap/cppgc/heap-statistics-collector-unittest.cc",
"heap/cppgc/heap-unittest.cc",
"heap/cppgc/incremental-marking-schedule-unittest.cc",
@@ -117,6 +119,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/page-memory-unittest.cc",
"heap/cppgc/persistent-family-unittest.cc",
"heap/cppgc/prefinalizer-unittest.cc",
+ "heap/cppgc/sanitizer-unittest.cc",
"heap/cppgc/source-location-unittest.cc",
"heap/cppgc/stack-unittest.cc",
"heap/cppgc/stats-collector-scopes-unittest.cc",
@@ -129,6 +132,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/tests.h",
"heap/cppgc/visitor-unittest.cc",
"heap/cppgc/weak-container-unittest.cc",
+ "heap/cppgc/workloads-unittest.cc",
"heap/cppgc/write-barrier-unittest.cc",
]
@@ -359,6 +363,7 @@ v8_source_set("unittests_sources") {
"parser/ast-value-unittest.cc",
"parser/preparser-unittest.cc",
"profiler/strings-storage-unittest.cc",
+ "regexp/regexp-unittest.cc",
"regress/regress-crbug-1041240-unittest.cc",
"regress/regress-crbug-1056054-unittest.cc",
"regress/regress-crbug-938251-unittest.cc",
@@ -387,6 +392,10 @@ v8_source_set("unittests_sources") {
"zone/zone-unittest.cc",
]
+ if (v8_enable_runtime_call_stats) {
+ sources += [ "logging/runtime-call-stats-unittest.cc" ]
+ }
+
if (v8_enable_webassembly) {
sources += [
"../../test/common/wasm/wasm-macro-gen.h",
diff --git a/chromium/v8/third_party/google_benchmark/BUILD.gn b/chromium/v8/third_party/google_benchmark/BUILD.gn
index 565a39d51c1..e746cc421a0 100644
--- a/chromium/v8/third_party/google_benchmark/BUILD.gn
+++ b/chromium/v8/third_party/google_benchmark/BUILD.gn
@@ -3,8 +3,9 @@
# found in the LICENSE file.
import("//build/config/gclient_args.gni")
+import("../../gni/v8.gni")
-if (checkout_google_benchmark) {
+if (v8_enable_google_benchmark) {
config("benchmark_config") {
include_dirs = [ "src/include" ]
}
@@ -35,11 +36,12 @@ if (checkout_google_benchmark) {
"src/src/counter.cc",
"src/src/counter.h",
"src/src/csv_reporter.cc",
- "src/src/cycle_clock.h",
"src/src/internal_macros.h",
"src/src/json_reporter.cc",
"src/src/log.h",
"src/src/mutex.h",
+ "src/src/perf_counters.cc",
+ "src/src/perf_counters.h",
"src/src/re.h",
"src/src/reporter.cc",
"src/src/sleep.cc",
@@ -68,6 +70,6 @@ if (checkout_google_benchmark) {
source_set("benchmark_main") {
testonly = true
sources = [ "src/src/benchmark_main.cc" ]
- deps = [ ":google_benchmark" ]
+ public_deps = [ ":google_benchmark" ]
}
}
diff --git a/chromium/v8/third_party/googletest/BUILD.gn b/chromium/v8/third_party/googletest/BUILD.gn
index cfa9205547d..4d393efd95f 100644
--- a/chromium/v8/third_party/googletest/BUILD.gn
+++ b/chromium/v8/third_party/googletest/BUILD.gn
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("../../gni/v8.gni")
+
config("gtest_config") {
visibility = [ ":*" ] # gmock also shares this config.
@@ -49,8 +51,6 @@ config("gmock_config") {
source_set("gtest") {
testonly = true
sources = [
- # TODO(crbug.com/829773): Remove this after transitioning off <tr1/tuple>.
- "custom/gmock/internal/custom/gmock-port.h",
"src/googletest/include/gtest/gtest-death-test.h",
"src/googletest/include/gtest/gtest-matchers.h",
"src/googletest/include/gtest/gtest-message.h",
@@ -64,12 +64,9 @@ source_set("gtest") {
"src/googletest/include/gtest/internal/gtest-death-test-internal.h",
"src/googletest/include/gtest/internal/gtest-filepath.h",
"src/googletest/include/gtest/internal/gtest-internal.h",
- "src/googletest/include/gtest/internal/gtest-linked_ptr.h",
- "src/googletest/include/gtest/internal/gtest-param-util-generated.h",
"src/googletest/include/gtest/internal/gtest-param-util.h",
"src/googletest/include/gtest/internal/gtest-port.h",
"src/googletest/include/gtest/internal/gtest-string.h",
- "src/googletest/include/gtest/internal/gtest-tuple.h",
"src/googletest/include/gtest/internal/gtest-type-util.h",
#"src/googletest/src/gtest-all.cc", # Not needed by our build.
@@ -92,6 +89,15 @@ source_set("gtest") {
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
+ # V8-only workaround for http://crbug.com/chromium/1191946. Ensures that
+ # googletest is compiled with the same visibility such as the rest of V8, see
+ # https://source.chromium.org/chromium/chromium/src/+/master:v8/gni/v8.gni
+ if ((is_posix || is_fuchsia) &&
+ (v8_enable_backtrace || v8_monolithic || v8_expose_symbols)) {
+ configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+ configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ }
+
deps = []
if (is_fuchsia) {
@@ -118,22 +124,16 @@ source_set("gmock") {
"src/googlemock/include/gmock/gmock-actions.h",
"src/googlemock/include/gmock/gmock-cardinalities.h",
"src/googlemock/include/gmock/gmock-function-mocker.h",
- "src/googlemock/include/gmock/gmock-generated-actions.h",
- "src/googlemock/include/gmock/gmock-generated-nice-strict.h",
"src/googlemock/include/gmock/gmock-matchers.h",
"src/googlemock/include/gmock/gmock-more-actions.h",
"src/googlemock/include/gmock/gmock-more-matchers.h",
"src/googlemock/include/gmock/gmock-nice-strict.h",
"src/googlemock/include/gmock/gmock-spec-builders.h",
"src/googlemock/include/gmock/gmock.h",
- "src/googlemock/include/gmock/internal/gmock-generated-internal-utils.h",
"src/googlemock/include/gmock/internal/gmock-internal-utils.h",
"src/googlemock/include/gmock/internal/gmock-port.h",
"src/googlemock/include/gmock/internal/gmock-pp.h",
- # gmock helpers.
- "custom/gmock/internal/custom/gmock-port.h",
-
#"src/googlemock/src/gmock-all.cc", # Not needed by our build.
"src/googlemock/src/gmock-cardinalities.cc",
"src/googlemock/src/gmock-internal-utils.cc",
@@ -142,10 +142,21 @@ source_set("gmock") {
"src/googlemock/src/gmock.cc",
]
+ # V8-only workaround for http://crbug.com/chromium/1191946. Ensures that
+ # googletest is compiled with the same visibility such as the rest of V8, see
+ # https://source.chromium.org/chromium/chromium/src/+/master:v8/gni/v8.gni
+ if ((is_posix || is_fuchsia) &&
+ (v8_enable_backtrace || v8_monolithic || v8_expose_symbols)) {
+ configs -= [ "//build/config/gcc:symbol_visibility_hidden" ]
+ configs += [ "//build/config/gcc:symbol_visibility_default" ]
+ }
+
public_configs = [
":gmock_config",
":gtest_config",
]
+
+ deps = [ ":gtest" ]
}
# Do NOT depend on this directly. Use //testing/gmock:gmock_main instead.
@@ -153,5 +164,8 @@ source_set("gmock") {
static_library("gmock_main") {
testonly = true
sources = [ "src/googlemock/src/gmock_main.cc" ]
- deps = [ ":gmock" ]
+ deps = [
+ ":gmock",
+ ":gtest",
+ ]
}
diff --git a/chromium/v8/third_party/inspector_protocol/BUILD.gn b/chromium/v8/third_party/inspector_protocol/BUILD.gn
index 880b651c095..94b7fe2677a 100644
--- a/chromium/v8/third_party/inspector_protocol/BUILD.gn
+++ b/chromium/v8/third_party/inspector_protocol/BUILD.gn
@@ -5,7 +5,10 @@
import("../../gni/v8.gni")
config("crdtp_config") {
- visibility = [ "../../src/inspector:*", ":*" ]
+ visibility = [
+ ":*",
+ "../../src/inspector:*",
+ ]
configs = [ "../../:internal_config" ]
include_dirs = [ "../../include" ]
}
@@ -46,6 +49,7 @@ v8_source_set("crdtp_platform") {
"crdtp/json_platform_v8.cc",
]
public_deps = [ "../..:v8_libbase" ]
+ deps = [ "../../:v8_internal_headers" ]
configs = [ ":crdtp_config" ]
}
diff --git a/chromium/v8/third_party/inspector_protocol/README.v8 b/chromium/v8/third_party/inspector_protocol/README.v8
index 97bc1c34147..cc5e083c6a2 100644
--- a/chromium/v8/third_party/inspector_protocol/README.v8
+++ b/chromium/v8/third_party/inspector_protocol/README.v8
@@ -2,7 +2,7 @@ Name: inspector protocol
Short Name: inspector_protocol
URL: https://chromium.googlesource.com/deps/inspector_protocol/
Version: 0
-Revision: 94298cef795ec994106bdaff002c41182911b767
+Revision: 35e8d2d89cb017d72cf905362672de77c978e1e6
License: BSD
License File: LICENSE
Security Critical: no
diff --git a/chromium/v8/third_party/inspector_protocol/crdtp/serializable.cc b/chromium/v8/third_party/inspector_protocol/crdtp/serializable.cc
index 20de53ecc08..7a21ffd35fe 100644
--- a/chromium/v8/third_party/inspector_protocol/crdtp/serializable.cc
+++ b/chromium/v8/third_party/inspector_protocol/crdtp/serializable.cc
@@ -4,6 +4,8 @@
#include "serializable.h"
+#include <utility>
+
namespace v8_crdtp {
// =============================================================================
// Serializable - An object to be emitted as a sequence of bytes.
@@ -18,7 +20,8 @@ std::vector<uint8_t> Serializable::Serialize() const {
namespace {
class PreSerialized : public Serializable {
public:
- explicit PreSerialized(std::vector<uint8_t> bytes) : bytes_(bytes) {}
+ explicit PreSerialized(std::vector<uint8_t> bytes)
+ : bytes_(std::move(bytes)) {}
void AppendSerialized(std::vector<uint8_t>* out) const override {
out->insert(out->end(), bytes_.begin(), bytes_.end());
diff --git a/chromium/v8/third_party/inspector_protocol/crdtp/status.cc b/chromium/v8/third_party/inspector_protocol/crdtp/status.cc
index 4a8e03d3898..7181df97d16 100644
--- a/chromium/v8/third_party/inspector_protocol/crdtp/status.cc
+++ b/chromium/v8/third_party/inspector_protocol/crdtp/status.cc
@@ -113,6 +113,8 @@ std::string Status::Message() const {
return "BINDINGS: string8 value expected";
case Error::BINDINGS_BINARY_VALUE_EXPECTED:
return "BINDINGS: binary value expected";
+ case Error::BINDINGS_DICTIONARY_VALUE_EXPECTED:
+ return "BINDINGS: dictionary value expected";
}
// Some compilers can't figure out that we can't get here.
return "INVALID ERROR CODE";
diff --git a/chromium/v8/third_party/inspector_protocol/crdtp/status.h b/chromium/v8/third_party/inspector_protocol/crdtp/status.h
index 45e0a57acfb..1039156942c 100644
--- a/chromium/v8/third_party/inspector_protocol/crdtp/status.h
+++ b/chromium/v8/third_party/inspector_protocol/crdtp/status.h
@@ -77,6 +77,7 @@ enum class Error {
BINDINGS_STRING_VALUE_EXPECTED = 0x34,
BINDINGS_STRING8_VALUE_EXPECTED = 0x35,
BINDINGS_BINARY_VALUE_EXPECTED = 0x36,
+ BINDINGS_DICTIONARY_VALUE_EXPECTED = 0x37,
};
// A status value with position that can be copied. The default status
diff --git a/chromium/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template b/chromium/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template
index 36c8dcc3563..a16b522c38a 100644
--- a/chromium/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template
+++ b/chromium/v8/third_party/inspector_protocol/lib/ValueConversions_cpp.template
@@ -96,6 +96,10 @@ bool ProtocolTypeTraits<std::unique_ptr<DictionaryValue>>::Deserialize(
std::unique_ptr<Value> res;
if (!ProtocolTypeTraits<std::unique_ptr<Value>>::Deserialize(state, &res))
return false;
+ if (res->type() != Value::TypeObject) {
+ state->RegisterError(Error::BINDINGS_DICTIONARY_VALUE_EXPECTED);
+ return false;
+ }
*value = DictionaryValue::cast(std::move(res));
return true;
}
diff --git a/chromium/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template b/chromium/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
index e503f5c23e2..10488f22432 100644
--- a/chromium/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
+++ b/chromium/v8/third_party/inspector_protocol/lib/base_string_adapter_cc.template
@@ -11,7 +11,6 @@
#include "base/base64.h"
#include "base/json/json_reader.h"
#include "base/memory/ptr_util.h"
-#include "base/strings/string16.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/values.h"
@@ -141,7 +140,7 @@ std::unique_ptr<base::Value> toBaseValue(Value* value, int depth) {
// static
String StringUtil::fromUTF16LE(const uint16_t* data, size_t length) {
std::string utf8;
- base::UTF16ToUTF8(reinterpret_cast<const base::char16*>(data), length, &utf8);
+ base::UTF16ToUTF8(reinterpret_cast<const char16_t*>(data), length, &utf8);
return utf8;
}
@@ -246,4 +245,4 @@ void ProtocolTypeTraits<Binary>::Serialize(const Binary& value, std::vector<uint
value.AppendSerialized(bytes);
}
-} // namespace {{config.crdtp.namespace}} \ No newline at end of file
+} // namespace {{config.crdtp.namespace}}
diff --git a/chromium/v8/third_party/inspector_protocol/pdl.py b/chromium/v8/third_party/inspector_protocol/pdl.py
index d7733634e58..9a9fec98988 100644
--- a/chromium/v8/third_party/inspector_protocol/pdl.py
+++ b/chromium/v8/third_party/inspector_protocol/pdl.py
@@ -27,6 +27,9 @@ def assignType(item, type, is_array=False, map_binary_to_string=False):
type = 'string'
if map_binary_to_string and type == 'binary':
type = 'string'
+ if 'description' in item:
+ item['description'] = (item['description'] +
+ ' (Encoded as a base64 string when passed over JSON)')
if type in primitiveTypes:
item['type'] = type
else:
diff --git a/chromium/v8/third_party/jsoncpp/BUILD.gn b/chromium/v8/third_party/jsoncpp/BUILD.gn
index 411d2d62e73..6324be019c4 100644
--- a/chromium/v8/third_party/jsoncpp/BUILD.gn
+++ b/chromium/v8/third_party/jsoncpp/BUILD.gn
@@ -21,9 +21,7 @@ source_set("jsoncpp") {
sources = [
"generated/version.h",
"source/include/json/assertions.h",
- "source/include/json/autolink.h",
"source/include/json/config.h",
- "source/include/json/features.h",
"source/include/json/forwards.h",
"source/include/json/json.h",
"source/include/json/reader.h",
diff --git a/chromium/v8/tools/clusterfuzz/v8_foozzie.py b/chromium/v8/tools/clusterfuzz/v8_foozzie.py
index 9f3810c9f5b..52b79540939 100755
--- a/chromium/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/chromium/v8/tools/clusterfuzz/v8_foozzie.py
@@ -211,14 +211,14 @@ class ExecutionArgumentsConfig(object):
'default: bundled in the directory of this script',
default=DEFAULT_D8)
- def make_options(self, options, default_config=None):
+ def make_options(self, options, default_config=None, default_d8=None):
def get(name):
return getattr(options, '%s_%s' % (self.label, name))
config = default_config or get('config')
assert config in CONFIGS
- d8 = get('d8')
+ d8 = default_d8 or get('d8')
if not os.path.isabs(d8):
d8 = os.path.join(BASE_PATH, d8)
assert os.path.exists(d8)
@@ -239,6 +239,13 @@ class ExecutionConfig(object):
flags = getattr(options, label).flags
self.command = Command(options, label, d8, flags)
+ # Options for a fallback configuration only exist when comparing
+ # different architectures.
+ fallback_label = label + '_fallback'
+ self.fallback = None
+ if getattr(options, fallback_label, None):
+ self.fallback = ExecutionConfig(options, fallback_label)
+
@property
def flags(self):
return self.command.flags
@@ -278,7 +285,15 @@ def parse_args():
options.first = first_config_arguments.make_options(options)
options.second = second_config_arguments.make_options(options)
options.default = second_config_arguments.make_options(
- options, DEFAULT_CONFIG)
+ options, default_config=DEFAULT_CONFIG)
+
+ # Use fallback configurations only on diffrent architectures. In this
+ # case we are going to re-test against the first architecture.
+ if options.first.arch != options.second.arch:
+ options.second_fallback = second_config_arguments.make_options(
+ options, default_d8=options.first.d8)
+ options.default_fallback = second_config_arguments.make_options(
+ options, default_config=DEFAULT_CONFIG, default_d8=options.first.d8)
# Ensure we make a valid comparison.
if (options.first.d8 == options.second.d8 and
@@ -315,10 +330,12 @@ def fail_bailout(output, ignore_by_output_fun):
def format_difference(
- source_key, first_config, second_config,
- first_config_output, second_config_output, difference, source=None):
+ first_config, second_config,
+ first_config_output, second_config_output,
+ difference, source_key=None, source=None):
# The first three entries will be parsed by clusterfuzz. Format changes
# will require changes on the clusterfuzz side.
+ source_key = source_key or cluster_failures(source)
first_config_label = '%s,%s' % (first_config.arch, first_config.config)
second_config_label = '%s,%s' % (second_config.arch, second_config.config)
source_file_text = SOURCE_FILE_TEMPLATE % source if source else ''
@@ -376,6 +393,29 @@ def cluster_failures(source, known_failures=None):
return long_key[:ORIGINAL_SOURCE_HASH_LENGTH]
+class RepeatedRuns(object):
+ """Helper class for storing statistical data from repeated runs."""
+ def __init__(self, test_case, timeout, verbose):
+ self.test_case = test_case
+ self.timeout = timeout
+ self.verbose = verbose
+
+ # Stores if any run has crashed or was simulated.
+ self.has_crashed = False
+ self.simulated = False
+
+ def run(self, config):
+ comparison_output = config.command.run(
+ self.test_case, timeout=self.timeout, verbose=self.verbose)
+ self.has_crashed = self.has_crashed or comparison_output.HasCrashed()
+ self.simulated = self.simulated or config.is_error_simulation
+ return comparison_output
+
+ @property
+ def crash_state(self):
+ return '_simulated_crash_' if self.simulated else '_unexpected_crash_'
+
+
def run_comparisons(suppress, execution_configs, test_case, timeout,
verbose=True, ignore_crashes=True, source_key=None):
"""Runs different configurations and bails out on output difference.
@@ -393,20 +433,15 @@ def run_comparisons(suppress, execution_configs, test_case, timeout,
source_key: A fixed source key. If not given, it will be inferred from the
output.
"""
- run_test_case = lambda config: config.command.run(
- test_case, timeout=timeout, verbose=verbose)
+ runner = RepeatedRuns(test_case, timeout, verbose)
# Run the baseline configuration.
baseline_config = execution_configs[0]
- baseline_output = run_test_case(baseline_config)
- has_crashed = baseline_output.HasCrashed()
- simulated = baseline_config.is_error_simulation
+ baseline_output = runner.run(baseline_config)
# Iterate over the remaining configurations, run and compare.
for comparison_config in execution_configs[1:]:
- comparison_output = run_test_case(comparison_config)
- has_crashed = has_crashed or comparison_output.HasCrashed()
- simulated = simulated or comparison_config.is_error_simulation
+ comparison_output = runner.run(comparison_config)
difference, source = suppress.diff(baseline_output, comparison_output)
if difference:
@@ -416,12 +451,25 @@ def run_comparisons(suppress, execution_configs, test_case, timeout,
fail_bailout(baseline_output, suppress.ignore_by_output)
fail_bailout(comparison_output, suppress.ignore_by_output)
- source_key = source_key or cluster_failures(source)
+ # Check if a difference also occurs with the fallback configuration and
+ # give it precedence. E.g. we always prefer x64 differences.
+ if comparison_config.fallback:
+ fallback_output = runner.run(comparison_config.fallback)
+ fallback_difference, fallback_source = suppress.diff(
+ baseline_output, fallback_output)
+ if fallback_difference:
+ fail_bailout(fallback_output, suppress.ignore_by_output)
+ source = fallback_source
+ comparison_config = comparison_config.fallback
+ comparison_output = fallback_output
+ difference = fallback_difference
+
raise FailException(format_difference(
- source_key, baseline_config, comparison_config,
- baseline_output, comparison_output, difference, source))
+ baseline_config, comparison_config,
+ baseline_output, comparison_output,
+ difference, source_key, source))
- if has_crashed:
+ if runner.has_crashed:
if ignore_crashes:
# Show if a crash has happened in one of the runs and no difference was
# detected. This is only for the statistics during experiments.
@@ -429,9 +477,8 @@ def run_comparisons(suppress, execution_configs, test_case, timeout,
else:
# Subsume simulated and unexpected crashes (e.g. during smoke tests)
# with one failure state.
- crash_state = '_simulated_crash_' if simulated else '_unexpected_crash_'
raise FailException(FAILURE_HEADER_TEMPLATE % dict(
- configs='', source_key='', suppression=crash_state))
+ configs='', source_key='', suppression=runner.crash_state))
def main():
@@ -448,7 +495,7 @@ def main():
content_bailout(content, suppress.ignore_by_content)
# Prepare the baseline, default and a secondary configuration to compare to.
- # The baseline (turbofan) takes precedence as many of the secondary configs
+ # The default (turbofan) takes precedence as many of the secondary configs
# are based on the turbofan config with additional parameters.
execution_configs = [
ExecutionConfig(options, 'first'),
diff --git a/chromium/v8/tools/clusterfuzz/v8_foozzie_test.py b/chromium/v8/tools/clusterfuzz/v8_foozzie_test.py
index eb8322ce622..a8ba74364bc 100755
--- a/chromium/v8/tools/clusterfuzz/v8_foozzie_test.py
+++ b/chromium/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -263,7 +263,7 @@ class SystemTest(unittest.TestCase):
Overview of fakes:
baseline: Example foozzie output including a syntax error.
- build1: Difference to baseline is a stack trace differece expected to
+ build1: Difference to baseline is a stack trace difference expected to
be suppressed.
build2: Difference to baseline is a non-suppressed output difference
causing the script to fail.
@@ -312,6 +312,36 @@ class SystemTest(unittest.TestCase):
self.assertIn('v8_mock_archs.js', lines[1])
self.assertIn('v8_mock_archs.js', lines[3])
+ def testDifferentArchFailFirst(self):
+ """Test that we re-test against x64. This tests the path that also fails
+ on x64 and then reports the error as x64.
+ """
+ with open(os.path.join(TEST_DATA, 'failure_output_arch.txt')) as f:
+ expected_output = f.read()
+ # Build 3 simulates x86 and produces a difference on --bad-flag, but
+ # the baseline build shows the same difference when --bad-flag is passed.
+ with self.assertRaises(subprocess.CalledProcessError) as ctx:
+ run_foozzie('build3', '--skip-smoke-tests',
+ '--second-config-extra-flags=--bad-flag')
+ e = ctx.exception
+ self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+ self.assertEqual(expected_output, cut_verbose_output(e.output, 3))
+
+ def testDifferentArchFailSecond(self):
+ """As above, but we test the path that only fails in the second (ia32)
+ run and not with x64 and then reports the error as ia32.
+ """
+ with open(os.path.join(TEST_DATA, 'failure_output_second.txt')) as f:
+ expected_output = f.read()
+ # Build 3 simulates x86 and produces a difference on --very-bad-flag,
+ # which the baseline build doesn't.
+ with self.assertRaises(subprocess.CalledProcessError) as ctx:
+ run_foozzie('build3', '--skip-smoke-tests',
+ '--second-config-extra-flags=--very-bad-flag')
+ e = ctx.exception
+ self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+ self.assertEqual(expected_output, cut_verbose_output(e.output, 3))
+
def testJitless(self):
"""Test that webassembly is mocked out when comparing with jitless."""
stdout = run_foozzie(
diff --git a/chromium/v8/tools/clusterfuzz/v8_mock.js b/chromium/v8/tools/clusterfuzz/v8_mock.js
index be7a40b70f4..41f1901d5d6 100644
--- a/chromium/v8/tools/clusterfuzz/v8_mock.js
+++ b/chromium/v8/tools/clusterfuzz/v8_mock.js
@@ -99,7 +99,11 @@ Object.defineProperty(
// Mock buffer access in float typed arrays because of varying NaN patterns.
(function() {
+ const origArrayFrom = Array.from;
+ const origArrayIsArray = Array.isArray;
+ const origFunctionPrototype = Function.prototype;
const origIsNaN = isNaN;
+ const origIterator = Symbol.iterator;
const deNaNify = function(value) { return origIsNaN(value) ? 1 : value; };
const mock = function(type) {
@@ -117,17 +121,17 @@ Object.defineProperty(
construct: function(target, args) {
for (let i = 0; i < args.length; i++) {
if (args[i] != null &&
- typeof args[i][Symbol.iterator] === 'function') {
+ typeof args[i][origIterator] === 'function') {
// Consume iterators.
- args[i] = Array.from(args[i]);
+ args[i] = origArrayFrom(args[i]);
}
- if (Array.isArray(args[i])) {
+ if (origArrayIsArray(args[i])) {
args[i] = args[i].map(deNaNify);
}
}
const obj = new (
- Function.prototype.bind.call(type, null, ...args));
+ origFunctionPrototype.bind.call(type, null, ...args));
return new Proxy(obj, {
get: function(x, prop) {
if (typeof x[prop] == "function")
diff --git a/chromium/v8/tools/codemap.mjs b/chromium/v8/tools/codemap.mjs
index 4986fbd3b05..5beeb3b1b6f 100644
--- a/chromium/v8/tools/codemap.mjs
+++ b/chromium/v8/tools/codemap.mjs
@@ -252,11 +252,18 @@ export class CodeMap {
}
/**
- * Returns an array of all libraries entries.
+ * Returns an array of all library entries.
*/
- getAllLibrariesEntries() {
+ getAllLibraryEntries() {
return this.libraries_.exportValues();
}
+
+ /**
+ * Returns an array of pairs of all library entries and their addresses.
+ */
+ getAllLibraryEntriesWithAddresses() {
+ return this.libraries_.exportKeysAndValues();
+ }
}
diff --git a/chromium/v8/tools/cppgc/gen_cmake.py b/chromium/v8/tools/cppgc/gen_cmake.py
index 6fc1bc0b030..90eda1f3bb1 100755
--- a/chromium/v8/tools/cppgc/gen_cmake.py
+++ b/chromium/v8/tools/cppgc/gen_cmake.py
@@ -244,6 +244,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
option(CPPGC_ENABLE_OBJECT_NAMES "Enable object names in cppgc for debug purposes" OFF)
option(CPPGC_ENABLE_CAGED_HEAP "Enable heap reservation of size 4GB, only possible for 64bit archs" OFF)
+option(CPPGC_ENABLE_VERIFY_LIVE_BYTES " Enable verification of live bytes in the marking verifier" OFF)
option(CPPGC_ENABLE_YOUNG_GENERATION "Enable young generation in cppgc" OFF)
set(CPPGC_TARGET_ARCH "x64" CACHE STRING "Target architecture, possible options: x64, x86, arm, arm64, ppc64, s390x, mipsel, mips64el")
@@ -433,6 +434,9 @@ endif()
if(CPPGC_ENABLE_CAGED_HEAP)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_CAGED_HEAP")
endif()
+if(CPPGC_ENABLE_VERIFY_LIVE_BYTES)
+ target_compile_definitions({target.name} PRIVATE "-DCPPGC_VERIFY_LIVE_BYTES")
+endif()
if(CPPGC_ENABLE_YOUNG_GENERATION)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_YOUNG_GENERATION")
endif()"""
diff --git a/chromium/v8/tools/debug_helper/get-object-properties.cc b/chromium/v8/tools/debug_helper/get-object-properties.cc
index 7199bc51d22..b85bbc05906 100644
--- a/chromium/v8/tools/debug_helper/get-object-properties.cc
+++ b/chromium/v8/tools/debug_helper/get-object-properties.cc
@@ -316,7 +316,10 @@ class ReadStringVisitor : public TqObjectVisitor {
bool IsExternalStringCached(const TqExternalString* object) {
// The safest way to get the instance type is to use known map pointers, in
// case the map data is not available.
- uintptr_t map = GetOrFinish(object->GetMapValue(accessor_));
+ Value<uintptr_t> map_ptr = object->GetMapValue(accessor_);
+ DCHECK_IMPLIES(map_ptr.validity == d::MemoryAccessResult::kOk,
+ !v8::internal::MapWord::IsPacked(map_ptr.value));
+ uintptr_t map = GetOrFinish(map_ptr);
if (done_) return false;
auto instance_types = FindKnownMapInstanceTypes(map, heap_addresses_);
// Exactly one of the matched instance types should be a string type,
@@ -347,10 +350,10 @@ class ReadStringVisitor : public TqObjectVisitor {
ExternalPointer_t resource_data =
GetOrFinish(object->GetResourceDataValue(accessor_));
#ifdef V8_COMPRESS_POINTERS
- uintptr_t data_address = static_cast<uintptr_t>(
- DecodeExternalPointer(GetPtrComprCageBaseFromOnHeapAddress(
- heap_addresses_.any_heap_pointer),
- resource_data, kExternalStringResourceDataTag));
+ Isolate* isolate = GetIsolateForHeapSandbox(
+ HeapObject::unchecked_cast(Object(heap_addresses_.any_heap_pointer)));
+ uintptr_t data_address = static_cast<uintptr_t>(DecodeExternalPointer(
+ isolate, resource_data, kExternalStringResourceDataTag));
#else
uintptr_t data_address = static_cast<uintptr_t>(resource_data);
#endif // V8_COMPRESS_POINTERS
@@ -500,6 +503,7 @@ class AddInfoVisitor : public TqObjectVisitor {
if (map_ptr.validity != d::MemoryAccessResult::kOk) {
return; // Can't read the JSObject. Nothing useful to do.
}
+ DCHECK(!v8::internal::MapWord::IsPacked(map_ptr.value));
TqMap map(map_ptr.value);
// On JSObject instances, this value is the start of in-object properties.
diff --git a/chromium/v8/tools/dev/gm.py b/chromium/v8/tools/dev/gm.py
index 4e318f2f32e..8a05da3cc96 100755
--- a/chromium/v8/tools/dev/gm.py
+++ b/chromium/v8/tools/dev/gm.py
@@ -262,10 +262,10 @@ class Config(object):
cpu = "arm"
elif self.arch == "android_arm64":
cpu = "arm64"
- elif self.arch == "arm64" and _GetMachine() == "aarch64":
+ elif self.arch == "arm64" and _GetMachine() in ("aarch64", "arm64"):
# arm64 build host:
cpu = "arm64"
- elif self.arch == "arm" and _GetMachine() == "aarch64":
+ elif self.arch == "arm" and _GetMachine() in ("aarch64", "arm64"):
cpu = "arm"
elif "64" in self.arch or self.arch == "s390x":
# Native x64 or simulator build.
@@ -291,7 +291,8 @@ class Config(object):
def GetSpecialCompiler(self):
if _GetMachine() == "aarch64":
- # We have no prebuilt Clang for arm64. Use the system Clang instead.
+ # We have no prebuilt Clang for arm64 on Linux, so use the system Clang
+ # instead.
return ["clang_base_path = \"/usr\"", "clang_use_chrome_plugins = false"]
return []
diff --git a/chromium/v8/tools/dev/v8gen.py b/chromium/v8/tools/dev/v8gen.py
index 18abf8aa25f..c6ba1d2174a 100755
--- a/chromium/v8/tools/dev/v8gen.py
+++ b/chromium/v8/tools/dev/v8gen.py
@@ -121,7 +121,7 @@ class GenerateGnArgs(object):
add_common_options(list_cmd)
# Default to "gen" unless global help is requested.
- if not args or args[0] not in subps.choices.keys() + ['-h', '--help']:
+ if not args or args[0] not in list(subps.choices) + ['-h', '--help']:
args = ['gen'] + args
return self.parser.parse_args(args)
@@ -193,14 +193,16 @@ class GenerateGnArgs(object):
return 0
def verbose_print_1(self, text):
- if self._options.verbosity >= 1:
+ if self._options.verbosity and self._options.verbosity >= 1:
print('#' * 80)
print(text)
def verbose_print_2(self, text):
- if self._options.verbosity >= 2:
+ if self._options.verbosity and self._options.verbosity >= 2:
indent = ' ' * 2
for l in text.splitlines():
+ if type(l) == bytes:
+ l = l.decode()
print(indent + l)
def _call_cmd(self, args):
@@ -306,7 +308,7 @@ if __name__ == "__main__":
try:
sys.exit(gen.main())
except Exception:
- if gen._options.verbosity < 2:
+ if not gen._options.verbosity or gen._options.verbosity < 2:
print ('\nHint: You can raise verbosity (-vv) to see the output of '
'failed commands.\n')
raise
diff --git a/chromium/v8/tools/generate-header-include-checks.py b/chromium/v8/tools/generate-header-include-checks.py
index 4e58a492dec..2171ee8a0da 100755
--- a/chromium/v8/tools/generate-header-include-checks.py
+++ b/chromium/v8/tools/generate-header-include-checks.py
@@ -31,6 +31,8 @@ OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
AUTO_EXCLUDE = [
# flag-definitions.h needs a mode set for being included.
'src/flags/flag-definitions.h',
+ # recorder.h should only be included conditionally.
+ 'src/libplatform/tracing/recorder.h',
]
AUTO_EXCLUDE_PATTERNS = [
'src/base/atomicops_internals_.*',
diff --git a/chromium/v8/tools/mb/mb.py b/chromium/v8/tools/mb/mb.py
index 8ca90899440..7031ba50dbd 100755
--- a/chromium/v8/tools/mb/mb.py
+++ b/chromium/v8/tools/mb/mb.py
@@ -27,7 +27,16 @@ import sys
import subprocess
import tempfile
import traceback
-import urllib2
+
+# for py2/py3 compatibility
+try:
+ from urllib.parse import quote
+except ImportError:
+ from urllib2 import quote
+try:
+ from urllib.request import urlopen
+except ImportError:
+ from urllib2 import urlopen
from collections import OrderedDict
@@ -869,7 +878,7 @@ class MetaBuildWrapper(object):
return err, labels
def GNCmd(self, subcommand, path, *args):
- if self.platform == 'linux2':
+ if self.platform.startswith('linux'):
subdir, exe = 'linux64', 'gn'
elif self.platform == 'darwin':
subdir, exe = 'mac', 'gn'
@@ -1109,7 +1118,7 @@ class MetaBuildWrapper(object):
def CheckCompile(self, builder_group, builder):
url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1'
- url = urllib2.quote(
+ url = quote(
url_template.format(builder_group=builder_group, builder=builder),
safe=':/()?=')
try:
@@ -1201,7 +1210,7 @@ class MetaBuildWrapper(object):
def Fetch(self, url):
# This function largely exists so it can be overridden for testing.
- f = urllib2.urlopen(url)
+ f = urlopen(url)
contents = f.read()
f.close()
return contents
diff --git a/chromium/v8/tools/profile.mjs b/chromium/v8/tools/profile.mjs
index f4be41e2da3..8001f4b5aeb 100644
--- a/chromium/v8/tools/profile.mjs
+++ b/chromium/v8/tools/profile.mjs
@@ -151,6 +151,14 @@ export class Profile {
scripts_ = [];
urlToScript_ = new Map();
+ serializeVMSymbols() {
+ let result = this.codeMap_.getAllStaticEntriesWithAddresses();
+ result.concat(this.codeMap_.getAllLibraryEntriesWithAddresses())
+ return result.map(([startAddress, codeEntry]) => {
+ return [codeEntry.getName(), startAddress, startAddress + codeEntry.size]
+ });
+ }
+
/**
* Returns whether a function with the specified name must be skipped.
* Should be overriden by subclasses.
@@ -182,7 +190,6 @@ export class Profile {
COMPILED: 0,
IGNITION: 1,
BASELINE: 2,
- NATIVE_CONTEXT_INDEPENDENT: 3,
TURBOPROP: 4,
TURBOFAN: 5,
}
@@ -198,8 +205,6 @@ export class Profile {
return this.CodeState.IGNITION;
case '^':
return this.CodeState.BASELINE;
- case '-':
- return this.CodeState.NATIVE_CONTEXT_INDEPENDENT;
case '+':
return this.CodeState.TURBOPROP;
case '*':
@@ -215,8 +220,6 @@ export class Profile {
return "Unopt";
} else if (state === this.CodeState.BASELINE) {
return "Baseline";
- } else if (state === this.CodeState.NATIVE_CONTEXT_INDEPENDENT) {
- return "NCI";
} else if (state === this.CodeState.TURBOPROP) {
return "Turboprop";
} else if (state === this.CodeState.TURBOFAN) {
diff --git a/chromium/v8/tools/profview/profile-utils.js b/chromium/v8/tools/profview/profile-utils.js
index 35fe3d7cb25..bd9f248b54d 100644
--- a/chromium/v8/tools/profview/profile-utils.js
+++ b/chromium/v8/tools/profview/profile-utils.js
@@ -6,11 +6,12 @@
let codeKinds = [
"UNKNOWN",
- "CPPPARSE",
- "CPPCOMPBC",
- "CPPCOMP",
- "CPPGC",
- "CPPEXT",
+ "CPP_PARSE",
+ "CPP_COMP_BC",
+ "CPP_COMP_BASELINE",
+ "CPP_COMP",
+ "CPP_GC",
+ "CPP_EXT",
"CPP",
"LIB",
"IC",
@@ -18,11 +19,10 @@ let codeKinds = [
"STUB",
"BUILTIN",
"REGEXP",
- "JSOPT",
- "JSUNOPT",
- "JSNCI",
- "JSTURBOPROP",
- "JSBASELINE",
+ "JS_OPT",
+ "JS_UNOPT",
+ "JS_TURBOPROP",
+ "JS_BASELINE",
];
function resolveCodeKind(code) {
@@ -53,17 +53,15 @@ function resolveCodeKind(code) {
return "CODE";
} else if (code.type === "JS") {
if (code.kind === "Builtin") {
- return "JSUNOPT";
+ return "JS_UNOPT";
} else if (code.kind === "Opt") {
- return "JSOPT";
+ return "JS_OPT";
} else if (code.kind === "Unopt") {
- return "JSUNOPT";
- } else if (code.kind === "NCI") {
- return "JSNCI";
+ return "JS_UNOPT";
} else if (code.kind === "Baseline") {
- return "JSBASELINE";
+ return "JS_BASELINE";
} else if (code.kind === "Turboprop") {
- return "JSTURBOPROP";
+ return "JS_TURBOPROP";
}
}
console.log("Unknown code type '" + type + "'.");
@@ -73,16 +71,17 @@ function resolveCodeKindAndVmState(code, vmState) {
let kind = resolveCodeKind(code);
if (kind === "CPP") {
if (vmState === 1) {
- kind = "CPPGC";
+ kind = "CPP_GC";
} else if (vmState === 2) {
- kind = "CPPPARSE";
+ kind = "CPP_PARSE";
} else if (vmState === 3) {
- kind = "CPPCOMPBC";
+ kind = "CPP_COMP_BC";
} else if (vmState === 4) {
- kind = "CPPCOMP";
+ kind = "CPP_COMP";
} else if (vmState === 6) {
- kind = "CPPEXT";
+ kind = "CPP_EXT";
}
+ // TODO(cbruni): add CPP_COMP_BASELINE
}
return kind;
}
@@ -272,20 +271,20 @@ function buildCategoryTreeAndLookup() {
}
root.children.push(n);
}
- addCategory("JS Optimized", [ "JSOPT" ]);
- addCategory("JS NCI", [ "JSNCI" ]);
- addCategory("JS Turboprop", [ "JSTURBOPROP" ]);
- addCategory("JS Baseline", [ "JSBASELINE" ]);
- addCategory("JS Unoptimized", [ "JSUNOPT", "BC" ]);
+ addCategory("JS Optimized", [ "JS_OPT" ]);
+ addCategory("JS Turboprop", [ "JS_TURBOPROP" ]);
+ addCategory("JS Baseline", [ "JS_BASELINE" ]);
+ addCategory("JS Unoptimized", [ "JS_UNOPT", "BC" ]);
addCategory("IC", [ "IC" ]);
addCategory("RegExp", [ "REGEXP" ]);
addCategory("Other generated", [ "STUB", "BUILTIN" ]);
addCategory("C++", [ "CPP", "LIB" ]);
- addCategory("C++/GC", [ "CPPGC" ]);
- addCategory("C++/Parser", [ "CPPPARSE" ]);
- addCategory("C++/Bytecode compiler", [ "CPPCOMPBC" ]);
- addCategory("C++/Compiler", [ "CPPCOMP" ]);
- addCategory("C++/External", [ "CPPEXT" ]);
+ addCategory("C++/GC", [ "CPP_GC" ]);
+ addCategory("C++/Parser", [ "CPP_PARSE" ]);
+ addCategory("C++/Bytecode Compiler", [ "CPP_COMP_BC" ]);
+ addCategory("C++/Baseline Compiler", [ "CPP_COMP_BASELINE" ]);
+ addCategory("C++/Compiler", [ "CPP_COMP" ]);
+ addCategory("C++/External", [ "CPP_EXT" ]);
addCategory("Unknown", [ "UNKNOWN" ]);
return { categories, root };
diff --git a/chromium/v8/tools/profview/profview.js b/chromium/v8/tools/profview/profview.js
index 15a74f72405..60fc05e7274 100644
--- a/chromium/v8/tools/profview/profview.js
+++ b/chromium/v8/tools/profview/profview.js
@@ -212,63 +212,91 @@ let main = {
const CATEGORY_COLOR = "#f5f5f5";
const bucketDescriptors =
- [ { kinds : [ "JSOPT" ],
- color : "#64dd17",
- backgroundColor : "#80e27e",
- text : "JS Optimized" },
- { kinds : [ "JSNCI" ],
- color : "#3289a8",
- backgroundColor : "#3289a8",
- text : "JS NCI" },
- { kinds : [ "JSTURBOPROP" ],
- color : "#693eb8",
- backgroundColor : "#a6c452",
- text : "JS Turboprop" },
- { kinds : [ "JSBASELINE" ],
- color : "#b3005b",
- backgroundColor : "#ff9e80",
- text : "JS Baseline" },
- { kinds : [ "JSUNOPT", "BC" ],
- color : "#dd2c00",
- backgroundColor : "#ff9e80",
- text : "JS Unoptimized" },
- { kinds : [ "IC" ],
- color : "#ff6d00",
- backgroundColor : "#ffab40",
- text : "IC" },
- { kinds : [ "STUB", "BUILTIN", "REGEXP" ],
- color : "#ffd600",
- backgroundColor : "#ffea00",
- text : "Other generated" },
- { kinds : [ "CPP", "LIB" ],
- color : "#304ffe",
- backgroundColor : "#6ab7ff",
- text : "C++" },
- { kinds : [ "CPPEXT" ],
- color : "#003c8f",
- backgroundColor : "#c0cfff",
- text : "C++/external" },
- { kinds : [ "CPPPARSE" ],
- color : "#aa00ff",
- backgroundColor : "#ffb2ff",
- text : "C++/Parser" },
- { kinds : [ "CPPCOMPBC" ],
- color : "#43a047",
- backgroundColor : "#88c399",
- text : "C++/Bytecode compiler" },
- { kinds : [ "CPPCOMP" ],
- color : "#00e5ff",
- backgroundColor : "#6effff",
- text : "C++/Compiler" },
- { kinds : [ "CPPGC" ],
- color : "#6200ea",
- backgroundColor : "#e1bee7",
- text : "C++/GC" },
- { kinds : [ "UNKNOWN" ],
- color : "#bdbdbd",
- backgroundColor : "#efefef",
- text : "Unknown" }
- ];
+ [{
+ kinds: ["JS_OPT"],
+ color: "#64dd17",
+ backgroundColor: "#80e27e",
+ text: "JS Optimized"
+ },
+ {
+ kinds: ["JS_TURBOPROP"],
+ color: "#693eb8",
+ backgroundColor: "#a6c452",
+ text: "JS Turboprop"
+ },
+ {
+ kinds: ["JS_BASELINE"],
+ color: "#b3005b",
+ backgroundColor: "#ff9e80",
+ text: "JS Baseline"
+ },
+ {
+ kinds: ["JS_UNOPT", "BC"],
+ color: "#dd2c00",
+ backgroundColor: "#ff9e80",
+ text: "JS Unoptimized"
+ },
+ {
+ kinds: ["IC"],
+ color: "#ff6d00",
+ backgroundColor: "#ffab40",
+ text: "IC"
+ },
+ {
+ kinds: ["STUB", "BUILTIN", "REGEXP"],
+ color: "#ffd600",
+ backgroundColor: "#ffea00",
+ text: "Other generated"
+ },
+ {
+ kinds: ["CPP", "LIB"],
+ color: "#304ffe",
+ backgroundColor: "#6ab7ff",
+ text: "C++"
+ },
+ {
+ kinds: ["CPP_EXT"],
+ color: "#003c8f",
+ backgroundColor: "#c0cfff",
+ text: "C++/external"
+ },
+ {
+ kinds: ["CPP_PARSE"],
+ color: "#aa00ff",
+ backgroundColor: "#ffb2ff",
+ text: "C++/Parser"
+ },
+ {
+ kinds: ["CPP_COMP_BC"],
+ color: "#43a047",
+ backgroundColor: "#88c399",
+ text: "C++/Bytecode compiler"
+ },
+ {
+ kinds: ["CPP_COMP_BASELINE"],
+ color: "#43a047",
+ backgroundColor: "#5a8000",
+ text: "C++/Baseline compiler"
+ },
+ {
+ kinds: ["CPP_COMP"],
+ color: "#00e5ff",
+ backgroundColor: "#6effff",
+ text: "C++/Compiler"
+ },
+ {
+ kinds: ["CPP_GC"],
+ color: "#6200ea",
+ backgroundColor: "#e1bee7",
+ text: "C++/GC"
+ },
+ {
+ kinds: ["UNKNOWN"],
+ color: "#bdbdbd",
+ backgroundColor: "#efefef",
+ text: "Unknown"
+ }
+ ];
let kindToBucketDescriptor = {};
for (let i = 0; i < bucketDescriptors.length; i++) {
@@ -294,15 +322,17 @@ function codeTypeToText(type) {
switch (type) {
case "UNKNOWN":
return "Unknown";
- case "CPPPARSE":
+ case "CPP_PARSE":
return "C++ Parser";
- case "CPPCOMPBC":
- return "C++ Bytecode Compiler)";
- case "CPPCOMP":
+ case "CPP_COMP_BASELINE":
+ return "C++ Baseline Compiler";
+ case "CPP_COMP_BC":
+ return "C++ Bytecode Compiler";
+ case "CPP_COMP":
return "C++ Compiler";
- case "CPPGC":
+ case "CPP_GC":
return "C++ GC";
- case "CPPEXT":
+ case "CPP_EXT":
return "C++ External";
case "CPP":
return "C++";
@@ -318,13 +348,13 @@ function codeTypeToText(type) {
return "Builtin";
case "REGEXP":
return "RegExp";
- case "JSOPT":
+ case "JS_OPT":
return "JS opt";
- case "JSNCI":
- return "JS NCI";
- case "JSTURBOPROP":
+ case "JS_TURBOPROP":
return "JS Turboprop";
- case "JSUNOPT":
+ case "JS_BASELINE":
+ return "JS Baseline";
+ case "JS_UNOPT":
return "JS unopt";
}
console.error("Unknown type: " + type);
diff --git a/chromium/v8/tools/release/auto_roll.py b/chromium/v8/tools/release/auto_roll.py
index ffba545c8f2..76247b1fb3e 100755
--- a/chromium/v8/tools/release/auto_roll.py
+++ b/chromium/v8/tools/release/auto_roll.py
@@ -126,7 +126,7 @@ class UpdateChromiumCheckout(Step):
def RunStep(self):
self['json_output']['monitoring_state'] = 'update_chromium'
cwd = self._options.chromium
- self.GitCheckout("master", cwd=cwd)
+ self.GitCheckout("main", cwd=cwd)
self.DeleteBranch("work-branch", cwd=cwd)
self.GitPull(cwd=cwd)
@@ -168,7 +168,7 @@ class UploadCL(Step):
else:
print("Dry run - don't upload.")
- self.GitCheckout("master", cwd=cwd)
+ self.GitCheckout("main", cwd=cwd)
self.GitDeleteBranch("work-branch", cwd=cwd)
class CleanUp(Step):
diff --git a/chromium/v8/tools/release/test_scripts.py b/chromium/v8/tools/release/test_scripts.py
index bfac9a4a34a..7cf5d141301 100755
--- a/chromium/v8/tools/release/test_scripts.py
+++ b/chromium/v8/tools/release/test_scripts.py
@@ -612,7 +612,7 @@ deps = {
Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
- Cmd("git checkout -f master", "", cwd=chrome_dir),
+ Cmd("git checkout -f main", "", cwd=chrome_dir),
Cmd("git branch", "", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
@@ -626,7 +626,7 @@ deps = {
Cmd("git cl upload --send-mail -f "
"--cq-dry-run --set-bot-commit --bypass-hooks", "",
cwd=chrome_dir),
- Cmd("git checkout -f master", "", cwd=chrome_dir),
+ Cmd("git checkout -f main", "", cwd=chrome_dir),
Cmd("git branch -D work-branch", "", cwd=chrome_dir),
]
self.Expect(expectations)
diff --git a/chromium/v8/tools/system-analyzer/index.css b/chromium/v8/tools/system-analyzer/index.css
index ad3f24d27ba..53110079b2c 100644
--- a/chromium/v8/tools/system-analyzer/index.css
+++ b/chromium/v8/tools/system-analyzer/index.css
@@ -68,7 +68,7 @@ section {
border-radius: 8px;
cursor: pointer;
}
-::-webkit-scrollbar-thumb:hover {
+::-webkit-scrollbar-thumb:hover {
background-color: rgba(128, 128, 128, 0.8);
}
diff --git a/chromium/v8/tools/system-analyzer/index.mjs b/chromium/v8/tools/system-analyzer/index.mjs
index 4b0ed9f9c5f..550838118b3 100644
--- a/chromium/v8/tools/system-analyzer/index.mjs
+++ b/chromium/v8/tools/system-analyzer/index.mjs
@@ -110,7 +110,7 @@ class App {
entries = entry.entries.concat(entry.sourcePositions);
break;
default:
- throw new Error('Unknown selection type!');
+ throw new Error(`Unknown selection type: ${entry.constructor?.name}`);
}
if (entry.sourcePosition) {
entries.push(entry.sourcePosition);
@@ -134,7 +134,11 @@ class App {
}
selectEntriesOfSingleType(entries, type) {
- switch (entries[0]?.constructor ?? type) {
+ const entryType = entries[0]?.constructor ?? type;
+ switch (entryType) {
+ case Script:
+ entries = entries.flatMap(script => script.sourcePositions);
+ return this.showSourcePositions(entries);
case SourcePosition:
return this.showSourcePositions(entries);
case MapLogEntry:
@@ -148,7 +152,7 @@ class App {
case DeoptLogEntry:
return this.showDeoptEntries(entries);
default:
- throw new Error('Unknown selection type!');
+ throw new Error(`Unknown selection type: ${entryType?.name}`);
}
}
@@ -205,6 +209,8 @@ class App {
focusLogEntry(entry) {
switch (entry.constructor) {
+ case Script:
+ return this.focusSourcePosition(entry.sourcePositions[0]);
case SourcePosition:
return this.focusSourcePosition(entry);
case MapLogEntry:
@@ -218,7 +224,7 @@ class App {
case DeoptLogEntry:
return this.focusDeoptLogEntry(entry);
default:
- throw new Error('Unknown selection type!');
+ throw new Error(`Unknown selection type: ${entry.constructor?.name}`);
}
}
diff --git a/chromium/v8/tools/system-analyzer/processor.mjs b/chromium/v8/tools/system-analyzer/processor.mjs
index 9685e09ad6b..806cba132c8 100644
--- a/chromium/v8/tools/system-analyzer/processor.mjs
+++ b/chromium/v8/tools/system-analyzer/processor.mjs
@@ -230,17 +230,21 @@ export class Processor extends LogReader {
this.addSourcePosition(codeEntry, logEntry);
logEntry.functionSourcePosition = logEntry.sourcePosition;
// custom parse deopt location
- if (deoptLocation !== '<unknown>') {
- const colSeparator = deoptLocation.lastIndexOf(':');
- const rowSeparator = deoptLocation.lastIndexOf(':', colSeparator - 1);
- const script = this.getScript(deoptLocation.substring(1, rowSeparator));
- const line =
- parseInt(deoptLocation.substring(rowSeparator + 1, colSeparator));
- const column = parseInt(
- deoptLocation.substring(colSeparator + 1, deoptLocation.length - 1));
- logEntry.sourcePosition =
- script.addSourcePosition(line, column, logEntry);
+ if (deoptLocation === '<unknown>') return;
+ // Handle deopt location for inlined code: <location> inlined at <location>
+ const inlinedPos = deoptLocation.indexOf(' inlined at ');
+ if (inlinedPos > 0) {
+ deoptLocation = deoptLocation.substring(0, inlinedPos)
}
+ const colSeparator = deoptLocation.lastIndexOf(':');
+ const rowSeparator = deoptLocation.lastIndexOf(':', colSeparator - 1);
+ const script = this.getScript(deoptLocation.substring(1, rowSeparator));
+ if (!script) return;
+ const line =
+ parseInt(deoptLocation.substring(rowSeparator + 1, colSeparator));
+ const column = parseInt(
+ deoptLocation.substring(colSeparator + 1, deoptLocation.length - 1));
+ logEntry.sourcePosition = script.addSourcePosition(line, column, logEntry);
}
processScriptSource(scriptId, url, source) {
diff --git a/chromium/v8/tools/system-analyzer/view/code-panel-template.html b/chromium/v8/tools/system-analyzer/view/code-panel-template.html
index e04c6be8c18..3622c25062c 100644
--- a/chromium/v8/tools/system-analyzer/view/code-panel-template.html
+++ b/chromium/v8/tools/system-analyzer/view/code-panel-template.html
@@ -11,7 +11,9 @@ found in the LICENSE file. -->
}
</style>
<div class="panel">
- <h2>Code Panel</h2>
+ <input type="checkbox" id="closer" class="panelCloserInput" checked>
+ <label class="panelCloserLabel" for="closer">â–¼</label>
+ <h2 class="title">Code Panel</h2>
<div class="selection">
<select id="codeSelect"></select>
<button id="selectedRelatedButton">Select Related Events</button>
diff --git a/chromium/v8/tools/system-analyzer/view/code-panel.mjs b/chromium/v8/tools/system-analyzer/view/code-panel.mjs
index 3b5261e03c3..34e4ad300e4 100644
--- a/chromium/v8/tools/system-analyzer/view/code-panel.mjs
+++ b/chromium/v8/tools/system-analyzer/view/code-panel.mjs
@@ -5,11 +5,11 @@ import {IcLogEntry} from '../log/ic.mjs';
import {MapLogEntry} from '../log/map.mjs';
import {FocusEvent, SelectionEvent, ToolTipEvent} from './events.mjs';
-import {delay, DOM, formatBytes, formatMicroSeconds, V8CustomElement} from './helper.mjs';
+import {CollapsableElement, delay, DOM, formatBytes, formatMicroSeconds} from './helper.mjs';
DOM.defineCustomElement('view/code-panel',
(templateText) =>
- class CodePanel extends V8CustomElement {
+ class CodePanel extends CollapsableElement {
_timeline;
_selectedEntries;
_entry;
@@ -24,19 +24,17 @@ DOM.defineCustomElement('view/code-panel',
set timeline(timeline) {
this._timeline = timeline;
this.$('.panel').style.display = timeline.isEmpty() ? 'none' : 'inherit';
- this.update();
+ this.requestUpdate();
}
set selectedEntries(entries) {
this._selectedEntries = entries;
- // TODO: add code selection dropdown
- this._updateSelect();
this.entry = entries.first();
}
set entry(entry) {
this._entry = entry;
- this.update();
+ this.requestUpdate();
}
get _disassemblyNode() {
@@ -52,12 +50,15 @@ DOM.defineCustomElement('view/code-panel',
}
_update() {
+ this._updateSelect();
this._disassemblyNode.innerText = this._entry?.disassemble ?? '';
this._sourceNode.innerText = this._entry?.source ?? '';
}
_updateSelect() {
const select = this._codeSelectNode;
+ if (select.data === this._selectedEntries) return;
+ select.data = this._selectedEntries;
select.options.length = 0;
const sorted =
this._selectedEntries.slice().sort((a, b) => a.time - b.time);
diff --git a/chromium/v8/tools/system-analyzer/view/helper.mjs b/chromium/v8/tools/system-analyzer/view/helper.mjs
index 780864ef5a0..44da06ced4d 100644
--- a/chromium/v8/tools/system-analyzer/view/helper.mjs
+++ b/chromium/v8/tools/system-analyzer/view/helper.mjs
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-class CSSColor {
+export class CSSColor {
static _cache = new Map();
static get(name) {
@@ -121,7 +121,7 @@ class CSSColor {
}
}
-class DOM {
+export class DOM {
static element(type, classes) {
const node = document.createElement(type);
if (classes === undefined) return node;
@@ -185,19 +185,18 @@ class DOM {
}
}
-function $(id) {
+export function $(id) {
return document.querySelector(id)
}
-class V8CustomElement extends HTMLElement {
+export class V8CustomElement extends HTMLElement {
_updateTimeoutId;
- _updateCallback = this._update.bind(this);
+ _updateCallback = this.forceUpdate.bind(this);
constructor(templateText) {
super();
const shadowRoot = this.attachShadow({mode: 'open'});
shadowRoot.innerHTML = templateText;
- this._updateCallback = this._update.bind(this);
}
$(id) {
@@ -208,7 +207,7 @@ class V8CustomElement extends HTMLElement {
return this.shadowRoot.querySelectorAll(query);
}
- update(useAnimation = false) {
+ requestUpdate(useAnimation = false) {
if (useAnimation) {
window.cancelAnimationFrame(this._updateTimeoutId);
this._updateTimeoutId =
@@ -221,12 +220,54 @@ class V8CustomElement extends HTMLElement {
}
}
+ forceUpdate() {
+ this._update();
+ }
+
_update() {
throw Error('Subclass responsibility');
}
}
-class Chunked {
+export class CollapsableElement extends V8CustomElement {
+ constructor(templateText) {
+ super(templateText);
+ this._hasPendingUpdate = false;
+ this._closer.onclick = _ => this.tryUpdateOnVisibilityChange();
+ }
+
+ get _closer() {
+ return this.$('#closer');
+ }
+
+ _contentIsVisible() {
+ return !this._closer.checked;
+ }
+
+ requestUpdate(useAnimation = false) {
+ // A pending update will be resolved later, no need to try again.
+ if (this._hasPendingUpdate) return;
+ this._hasPendingUpdate = true;
+ this.requestUpdateIfVisible(useAnimation);
+ }
+
+ tryUpdateOnVisibilityChange() {
+ if (!this._hasPendingUpdate) return;
+ this.requestUpdateIfVisible(true);
+ }
+
+ requestUpdateIfVisible(useAnimation) {
+ if (!this._contentIsVisible()) return;
+ return super.requestUpdate(useAnimation);
+ }
+
+ forceUpdate() {
+ this._hasPendingUpdate = false;
+ super.forceUpdate();
+ }
+}
+
+export class Chunked {
constructor(iterable, limit) {
this._iterator = iterable[Symbol.iterator]();
this._limit = limit;
@@ -248,7 +289,7 @@ class Chunked {
}
}
-class LazyTable {
+export class LazyTable {
constructor(table, rowData, rowElementCreator, limit = 100) {
this._table = table;
this._chunkedRowData = new Chunked(rowData, limit);
@@ -258,22 +299,25 @@ class LazyTable {
} else {
table.replaceChild(DOM.tbody(), table.tBodies[0]);
}
- if (!table.tFoot) {
- const td = table.appendChild(DOM.element('tfoot'))
- .appendChild(DOM.tr())
- .appendChild(DOM.td());
- for (let count of [10, 100]) {
- const button = DOM.element('button');
- button.innerText = `+${count}`;
- button.onclick = (e) => this._addMoreRows(count);
- td.appendChild(button);
- }
- td.setAttribute('colspan', 100);
- }
+ if (!table.tFoot) this._addFooter();
table.tFoot.addEventListener('click', this._clickHandler);
this._addMoreRows();
}
+ _addFooter() {
+ const td = DOM.td();
+ td.setAttribute('colspan', 100);
+ for (let addCount of [10, 100, 250, 500]) {
+ const button = DOM.element('button');
+ button.innerText = `+${addCount}`;
+ button.onclick = (e) => this._addMoreRows(addCount);
+ td.appendChild(button);
+ }
+ this._table.appendChild(DOM.element('tfoot'))
+ .appendChild(DOM.tr())
+ .appendChild(td);
+ }
+
_addMoreRows(count = undefined) {
const fragment = new DocumentFragment();
for (let row of this._chunkedRowData.next(count)) {
@@ -304,11 +348,4 @@ export function gradientStopsFromGroups(
return stops;
}
-export * from '../helper.mjs';
-export {
- DOM,
- $,
- V8CustomElement,
- CSSColor,
- LazyTable,
-};
+export * from '../helper.mjs'; \ No newline at end of file
diff --git a/chromium/v8/tools/system-analyzer/view/list-panel-template.html b/chromium/v8/tools/system-analyzer/view/list-panel-template.html
index 4714f97c02f..fb38f5b4598 100644
--- a/chromium/v8/tools/system-analyzer/view/list-panel-template.html
+++ b/chromium/v8/tools/system-analyzer/view/list-panel-template.html
@@ -30,11 +30,13 @@ found in the LICENSE file. -->
text-align: left;
cursor: -webkit-zoom-in;
color: rgba(var(--border-color), 1);
+ user-select: none;
}
.toggle::before {
content: "â–¶";
}
+
.open .toggle::before {
content: "â–¼";
}
@@ -58,7 +60,7 @@ found in the LICENSE file. -->
</style>
<div class="panel">
- <input type="checkbox" id="closer" class="panelCloserInput">
+ <input type="checkbox" id="closer" class="panelCloserInput" checked>
<label class="panelCloserLabel" for="closer">â–¼</label>
<h2 id="title"></h2>
<div class="selection">
diff --git a/chromium/v8/tools/system-analyzer/view/list-panel.mjs b/chromium/v8/tools/system-analyzer/view/list-panel.mjs
index 85e3cd47e28..bc3b2f89d58 100644
--- a/chromium/v8/tools/system-analyzer/view/list-panel.mjs
+++ b/chromium/v8/tools/system-analyzer/view/list-panel.mjs
@@ -5,23 +5,24 @@
import {Script, SourcePosition} from '../../profile.mjs';
import {LogEntry} from '../log/log.mjs';
-import {FocusEvent} from './events.mjs';
+import {FocusEvent, ToolTipEvent} from './events.mjs';
import {groupBy, LazyTable} from './helper.mjs';
-import {DOM, V8CustomElement} from './helper.mjs';
+import {CollapsableElement, DOM} from './helper.mjs';
DOM.defineCustomElement('view/list-panel',
(templateText) =>
- class ListPanel extends V8CustomElement {
+ class ListPanel extends CollapsableElement {
_selectedLogEntries = [];
_displayedLogEntries = [];
_timeline;
_detailsClickHandler = this._handleDetailsClick.bind(this);
_logEntryClickHandler = this._handleLogEntryClick.bind(this);
+ _logEntryMouseOverHandler = this._logEntryMouseOverHandler.bind(this);
constructor() {
super(templateText);
- this.groupKey.addEventListener('change', e => this.update());
+ this.groupKey.addEventListener('change', e => this.requestUpdate());
this.showAllRadio.onclick = _ => this._showEntries(this._timeline);
this.showTimerangeRadio.onclick = _ =>
this._showEntries(this._timeline.selectionOrSelf);
@@ -72,9 +73,11 @@ DOM.defineCustomElement('view/list-panel',
get showAllRadio() {
return this.$('#show-all');
}
+
get showTimerangeRadio() {
return this.$('#show-timerange');
}
+
get showSelectionRadio() {
return this.$('#show-selection');
}
@@ -95,7 +98,7 @@ DOM.defineCustomElement('view/list-panel',
_showEntries(entries) {
this._displayedLogEntries = entries;
- this.update();
+ this.requestUpdate();
}
_update() {
@@ -123,6 +126,12 @@ DOM.defineCustomElement('view/list-panel',
this.dispatchEvent(new FocusEvent(group.key));
}
+ _logEntryMouseOverHandler(e) {
+ const group = e.currentTarget.group;
+ this.dispatchEvent(
+ new ToolTipEvent(group.key.toStringLong(), e.currentTarget));
+ }
+
_handleDetailsClick(event) {
event.stopPropagation();
const tr = event.target.parentNode;
@@ -182,6 +191,7 @@ DOM.defineCustomElement('view/list-panel',
const valueTd = tr.appendChild(DOM.td(`${group.key}`, 'key'));
if (this._isClickable(group.key)) {
tr.onclick = this._logEntryClickHandler;
+ tr.onmouseover = this._logEntryMouseOverHandler;
valueTd.classList.add('clickable');
}
return tr;
diff --git a/chromium/v8/tools/system-analyzer/view/map-panel-template.html b/chromium/v8/tools/system-analyzer/view/map-panel-template.html
index 8a2b23ee3d4..4790e376f99 100644
--- a/chromium/v8/tools/system-analyzer/view/map-panel-template.html
+++ b/chromium/v8/tools/system-analyzer/view/map-panel-template.html
@@ -11,7 +11,7 @@ found in the LICENSE file. -->
}
</style>
<div class="panel">
- <input type="checkbox" id="closer" class="panelCloserInput">
+ <input type="checkbox" id="closer" class="panelCloserInput" checked>
<label class="panelCloserLabel" for="closer">â–¼</label>
<h2>Map Panel</h2>
<div class="selection">
diff --git a/chromium/v8/tools/system-analyzer/view/map-panel.mjs b/chromium/v8/tools/system-analyzer/view/map-panel.mjs
index 7ee2325f34c..be00ef01246 100644
--- a/chromium/v8/tools/system-analyzer/view/map-panel.mjs
+++ b/chromium/v8/tools/system-analyzer/view/map-panel.mjs
@@ -7,100 +7,103 @@ import './map-panel/map-transitions.mjs';
import {MapLogEntry} from '../log/map.mjs';
import {FocusEvent} from './events.mjs';
-import {DOM, V8CustomElement} from './helper.mjs';
-
-DOM.defineCustomElement(
- 'view/map-panel', (templateText) => class MapPanel extends V8CustomElement {
- _map;
- _timeline;
- _selectedLogEntries = [];
- _displayedLogEntries = [];
-
- constructor() {
- super(templateText);
- this.searchBarBtn.addEventListener('click', e => this._handleSearch(e));
- this.showAllRadio.onclick = _ => this._showEntries(this._timeline);
- this.showTimerangeRadio.onclick = _ =>
- this._showEntries(this._timeline.selectionOrSelf);
- this.showSelectionRadio.onclick = _ =>
- this._showEntries(this._selectedLogEntries);
- }
-
- get showAllRadio() {
- return this.$('#show-all');
- }
- get showTimerangeRadio() {
- return this.$('#show-timerange');
- }
- get showSelectionRadio() {
- return this.$('#show-selection');
- }
-
- get mapTransitionsPanel() {
- return this.$('#map-transitions');
- }
-
- get mapDetailsTransitionsPanel() {
- return this.$('#map-details-transitions');
- }
-
- get mapDetailsPanel() {
- return this.$('#map-details');
- }
-
- get searchBarBtn() {
- return this.$('#searchBarBtn');
- }
-
- get searchBar() {
- return this.$('#searchBar');
- }
-
- set timeline(timeline) {
- console.assert(timeline !== undefined, 'timeline undefined!');
- this._timeline = timeline;
- this.$('.panel').style.display =
- timeline.isEmpty() ? 'none' : 'inherit';
- this.mapTransitionsPanel.timeline = timeline;
- this.mapDetailsTransitionsPanel.timeline = timeline;
- }
-
- set selectedLogEntries(entries) {
- if (entries === this._timeline.selection) {
- this.showTimerangeRadio.click();
- } else if (entries == this._timeline) {
- this.showAllRadio.click();
- } else {
- this._selectedLogEntries = entries;
- this.showSelectionRadio.click();
- }
- }
-
- set map(map) {
- this._map = map;
- this.mapDetailsTransitionsPanel.selectedLogEntries = [map];
- this.mapDetailsPanel.map = map;
- }
-
- _showEntries(entries) {
- this._displayedLogEntries = entries;
- this.mapTransitionsPanel.selectedLogEntries = entries;
- }
-
- update() {
- // nothing to do
- }
-
- _handleSearch(e) {
- let searchBar = this.$('#searchBarInput');
- let searchBarInput = searchBar.value;
- // access the map from model cache
- let selectedMap = MapLogEntry.get(searchBarInput);
- if (selectedMap) {
- searchBar.className = 'success';
- this.dispatchEvent(new FocusEvent(selectedMap));
- } else {
- searchBar.className = 'failure';
- }
- }
- });
+import {CollapsableElement, DOM} from './helper.mjs';
+
+DOM.defineCustomElement('view/map-panel',
+ (templateText) =>
+ class MapPanel extends CollapsableElement {
+ _map;
+ _timeline;
+ _selectedLogEntries = [];
+ _displayedLogEntries = [];
+
+ constructor() {
+ super(templateText);
+ this.searchBarBtn.addEventListener('click', e => this._handleSearch(e));
+ this.showAllRadio.onclick = _ => this._showEntries(this._timeline);
+ this.showTimerangeRadio.onclick = _ =>
+ this._showEntries(this._timeline.selectionOrSelf);
+ this.showSelectionRadio.onclick = _ =>
+ this._showEntries(this._selectedLogEntries);
+ }
+
+ get showAllRadio() {
+ return this.$('#show-all');
+ }
+
+ get showTimerangeRadio() {
+ return this.$('#show-timerange');
+ }
+
+ get showSelectionRadio() {
+ return this.$('#show-selection');
+ }
+
+ get mapTransitionsPanel() {
+ return this.$('#map-transitions');
+ }
+
+ get mapDetailsTransitionsPanel() {
+ return this.$('#map-details-transitions');
+ }
+
+ get mapDetailsPanel() {
+ return this.$('#map-details');
+ }
+
+ get searchBarBtn() {
+ return this.$('#searchBarBtn');
+ }
+
+ get searchBar() {
+ return this.$('#searchBar');
+ }
+
+ set timeline(timeline) {
+ console.assert(timeline !== undefined, 'timeline undefined!');
+ this._timeline = timeline;
+ this.$('.panel').style.display = timeline.isEmpty() ? 'none' : 'inherit';
+ this.mapTransitionsPanel.timeline = timeline;
+ this.mapDetailsTransitionsPanel.timeline = timeline;
+ }
+
+ set selectedLogEntries(entries) {
+ if (entries === this._timeline.selection) {
+ this.showTimerangeRadio.click();
+ } else if (entries == this._timeline) {
+ this.showAllRadio.click();
+ } else {
+ this._selectedLogEntries = entries;
+ this.showSelectionRadio.click();
+ }
+ }
+
+ set map(map) {
+ this._map = map;
+ this.requestUpdate();
+ }
+
+ _showEntries(entries) {
+ this._displayedLogEntries = entries;
+ this.requestUpdate();
+ }
+
+ _update() {
+ this.mapDetailsTransitionsPanel.selectedLogEntries = [this._map];
+ this.mapDetailsPanel.map = this._map;
+ this.mapTransitionsPanel.selectedLogEntries = this._displayedLogEntries;
+ }
+
+ _handleSearch(e) {
+ const searchBar = this.$('#searchBarInput');
+ const searchBarInput = searchBar.value;
+ // access the map from model cache
+ const selectedMap = MapLogEntry.get(searchBarInput);
+ if (selectedMap) {
+ searchBar.className = 'success';
+ this.dispatchEvent(new FocusEvent(selectedMap));
+ } else {
+ searchBar.className = 'failure';
+ }
+ }
+});
diff --git a/chromium/v8/tools/system-analyzer/view/map-panel/map-details.mjs b/chromium/v8/tools/system-analyzer/view/map-panel/map-details.mjs
index 446475a5b08..a8ea27e2e4e 100644
--- a/chromium/v8/tools/system-analyzer/view/map-panel/map-details.mjs
+++ b/chromium/v8/tools/system-analyzer/view/map-panel/map-details.mjs
@@ -25,7 +25,7 @@ DOM.defineCustomElement(
set map(map) {
if (this._map === map) return;
this._map = map;
- this.update();
+ this.requestUpdate();
}
_update() {
diff --git a/chromium/v8/tools/system-analyzer/view/map-panel/map-transitions.mjs b/chromium/v8/tools/system-analyzer/view/map-panel/map-transitions.mjs
index f60bd37d392..fbe78f91563 100644
--- a/chromium/v8/tools/system-analyzer/view/map-panel/map-transitions.mjs
+++ b/chromium/v8/tools/system-analyzer/view/map-panel/map-transitions.mjs
@@ -37,7 +37,7 @@ DOM.defineCustomElement(
set selectedLogEntries(list) {
this._selectedLogEntries = list;
- this.update();
+ this.requestUpdate();
}
_update() {
diff --git a/chromium/v8/tools/system-analyzer/view/script-panel-template.html b/chromium/v8/tools/system-analyzer/view/script-panel-template.html
index 27fd3d83eb6..ab5c3f95f35 100644
--- a/chromium/v8/tools/system-analyzer/view/script-panel-template.html
+++ b/chromium/v8/tools/system-analyzer/view/script-panel-template.html
@@ -40,11 +40,15 @@ found in the LICENSE file. -->
.marked {
background-color: var(--secondary-color);
+ box-shadow: 0px 0px 2px 3px var(--secondary-color);
+ animation-name: pulse;
+ animation-duration: 3s;
+ animation-delay: 500ms;
}
@keyframes pulse {
0% {
- box-shadow: 0px 0px 0px 0px var(--secondary-color);
+ box-shadow: 0px 0px 0px 3px var(--secondary-color);
}
5% {
box-shadow: 0px 0px 0px 10px var(--secondary-color);
@@ -56,11 +60,13 @@ found in the LICENSE file. -->
box-shadow: 0px 0px 0px 10px var(--secondary-color);
}
20% {
- box-shadow: 0px 0px 0px 0px var(--secondary-color);
+ box-shadow: 0px 0px 2px 3px var(--secondary-color);
}
}
</style>
<div class="panel">
+ <input type="checkbox" id="closer" class="panelCloserInput" checked>
+ <label class="panelCloserLabel" for="closer">â–¼</label>
<h2>Source Panel</h2>
<div class="selection">
<select id="script-dropdown"></select>
diff --git a/chromium/v8/tools/system-analyzer/view/script-panel.mjs b/chromium/v8/tools/system-analyzer/view/script-panel.mjs
index b0dac6960c3..6f5befcc160 100644
--- a/chromium/v8/tools/system-analyzer/view/script-panel.mjs
+++ b/chromium/v8/tools/system-analyzer/view/script-panel.mjs
@@ -5,11 +5,11 @@ import {groupBy} from '../helper.mjs';
import {App} from '../index.mjs'
import {SelectRelatedEvent, ToolTipEvent} from './events.mjs';
-import {CSSColor, delay, DOM, formatBytes, gradientStopsFromGroups, V8CustomElement} from './helper.mjs';
+import {CollapsableElement, CSSColor, delay, DOM, formatBytes, gradientStopsFromGroups} from './helper.mjs';
DOM.defineCustomElement('view/script-panel',
(templateText) =>
- class SourcePanel extends V8CustomElement {
+ class SourcePanel extends CollapsableElement {
_selectedSourcePositions = [];
_sourcePositionsToMarkNodes = [];
_scripts = [];
@@ -110,7 +110,7 @@ DOM.defineCustomElement('view/script-panel',
if (!sourcePosition) return;
const markNode = this._sourcePositionsToMarkNodes.get(sourcePosition);
markNode.scrollIntoView(
- {behavior: 'smooth', block: 'nearest', inline: 'center'});
+ {behavior: 'auto', block: 'center', inline: 'center'});
}
_handleSelectScript(e) {
diff --git a/chromium/v8/tools/system-analyzer/view/timeline/timeline-track.mjs b/chromium/v8/tools/system-analyzer/view/timeline/timeline-track.mjs
index 60216af2eef..6f05b772d81 100644
--- a/chromium/v8/tools/system-analyzer/view/timeline/timeline-track.mjs
+++ b/chromium/v8/tools/system-analyzer/view/timeline/timeline-track.mjs
@@ -112,7 +112,7 @@ DOM.defineCustomElement('view/timeline/timeline-track',
_updateChunks() {
this._chunks =
this._timeline.chunks(this.nofChunks, this._legend.filterPredicate);
- this.update();
+ this.requestUpdate();
}
get chunks() {
@@ -180,7 +180,7 @@ DOM.defineCustomElement('view/timeline/timeline-track',
node.ondblclick = this._chunkDoubleClickHandler;
}
const style = node.style;
- style.left = `${((chunk.start - start) * this._timeToPixel) | 0}px`;
+ style.left = `${i * kChunkWidth}px`;
style.height = `${height | 0}px`;
style.backgroundImage = this._createBackgroundImage(chunk);
node.chunk = chunk;
diff --git a/chromium/v8/tools/system-analyzer/view/tool-tip-template.html b/chromium/v8/tools/system-analyzer/view/tool-tip-template.html
index b0e9c72c45c..dc56997825f 100644
--- a/chromium/v8/tools/system-analyzer/view/tool-tip-template.html
+++ b/chromium/v8/tools/system-analyzer/view/tool-tip-template.html
@@ -24,6 +24,7 @@ found in the LICENSE file. -->
.textContent {
font-family: monospace;
white-space: pre;
+ overflow-wrap: anywhere;
overflow-x: hidden;
max-width: 500px;
}
diff --git a/chromium/v8/tools/system-analyzer/view/tool-tip.mjs b/chromium/v8/tools/system-analyzer/view/tool-tip.mjs
index 896c04dd298..a01ee1bdbf6 100644
--- a/chromium/v8/tools/system-analyzer/view/tool-tip.mjs
+++ b/chromium/v8/tools/system-analyzer/view/tool-tip.mjs
@@ -16,9 +16,10 @@ DOM.defineCustomElement(
this.hide();
} else {
this.show();
- this.update(true);
+ this.requestUpdate(true);
}
});
+ document.addEventListener('click', (e) => this.hide());
}
_update() {
@@ -31,7 +32,7 @@ DOM.defineCustomElement(
rect.y += rect.height;
}
this._setPosition(rect, atRight, atBottom);
- this.update(true);
+ this.requestUpdate(true);
}
set positionOrTargetNode(positionOrTargetNode) {
@@ -47,7 +48,7 @@ DOM.defineCustomElement(
this._targetNode = targetNode;
if (targetNode) {
this._intersectionObserver.observe(targetNode);
- this.update(true);
+ this.requestUpdate(true);
}
}
diff --git a/chromium/v8/tools/testrunner/base_runner.py b/chromium/v8/tools/testrunner/base_runner.py
index fdaf0370a17..b6889a3773a 100644
--- a/chromium/v8/tools/testrunner/base_runner.py
+++ b/chromium/v8/tools/testrunner/base_runner.py
@@ -172,6 +172,7 @@ class BuildConfig(object):
self.cfi_vptr = build_config['is_cfi']
self.control_flow_integrity = build_config['v8_control_flow_integrity']
self.concurrent_marking = build_config['v8_enable_concurrent_marking']
+ self.single_generation = build_config['v8_enable_single_generation']
self.dcheck_always_on = build_config['dcheck_always_on']
self.gcov_coverage = build_config['is_gcov_coverage']
self.is_android = build_config['is_android']
@@ -189,6 +190,8 @@ class BuildConfig(object):
self.verify_csa = build_config['v8_enable_verify_csa']
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
+ self.pointer_compression_shared_cage = build_config['v8_enable_pointer_compression_shared_cage']
+ self.third_party_heap = build_config['v8_enable_third_party_heap']
self.webassembly = build_config['v8_enable_webassembly']
# Export only for MIPS target
if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
@@ -229,6 +232,10 @@ class BuildConfig(object):
detected_options.append('lite_mode')
if self.pointer_compression:
detected_options.append('pointer_compression')
+ if self.pointer_compression_shared_cage:
+ detected_options.append('pointer_compression_shared_cage')
+ if self.third_party_heap:
+ detected_options.append('third_party_heap')
if self.webassembly:
detected_options.append('webassembly')
@@ -351,7 +358,7 @@ class BaseTestRunner(object):
# Progress
parser.add_option("-p", "--progress",
- choices=PROGRESS_INDICATORS.keys(), default="mono",
+ choices=list(PROGRESS_INDICATORS), default="mono",
help="The style of progress indicator (verbose, dots, "
"color, mono)")
parser.add_option("--json-test-results",
@@ -638,7 +645,7 @@ class BaseTestRunner(object):
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant)
- no_simd_sse = any(
+ no_simd_hardware = any(
i in options.extra_flags for i in ['--noenable-sse3',
'--no-enable-sse3'
'--noenable-ssse3',
@@ -646,10 +653,16 @@ class BaseTestRunner(object):
'--noenable-sse4-1',
'--no-enable-sse4_1'])
- # Set no_simd_sse on architectures without Simd enabled.
+ # Set no_simd_hardware on architectures without Simd enabled.
if self.build_config.arch == 'mips64el' or \
self.build_config.arch == 'mipsel':
- no_simd_sse = not simd_mips
+ no_simd_hardware = not simd_mips
+
+ # Ppc64 processors earlier than POWER9 do not support Simd instructions
+ if self.build_config.arch == 'ppc64' and \
+ not self.build_config.simulator_run and \
+ utils.GuessPowerProcessorVersion() < 9:
+ no_simd_hardware = True
return {
"arch": self.build_config.arch,
@@ -658,6 +671,7 @@ class BaseTestRunner(object):
"cfi_vptr": self.build_config.cfi_vptr,
"control_flow_integrity": self.build_config.control_flow_integrity,
"concurrent_marking": self.build_config.concurrent_marking,
+ "single_generation": self.build_config.single_generation,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"endurance_fuzzer": False,
@@ -673,7 +687,7 @@ class BaseTestRunner(object):
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
- "no_simd_sse": no_simd_sse,
+ "no_simd_hardware": no_simd_hardware,
"novfp3": False,
"optimize_for_size": "--optimize-for-size" in options.extra_flags,
"predictable": self.build_config.predictable,
@@ -681,11 +695,13 @@ class BaseTestRunner(object):
"simulator_run": self.build_config.simulator_run and
not options.dont_skip_simulator_slow_tests,
"system": self.target_os,
+ "third_party_heap": self.build_config.third_party_heap,
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
"verify_csa": self.build_config.verify_csa,
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
+ "pointer_compression_shared_cage": self.build_config.pointer_compression_shared_cage,
}
def _runner_flags(self):
@@ -718,6 +734,8 @@ class BaseTestRunner(object):
factor *= 2
if self.build_config.predictable:
factor *= 4
+ if self.build_config.tsan:
+ factor *= 1.5
if self.build_config.use_sanitizer:
factor *= 1.5
if self.build_config.is_full_debug:
diff --git a/chromium/v8/tools/testrunner/local/statusfile.py b/chromium/v8/tools/testrunner/local/statusfile.py
index 6c2cc01fb87..b6f97cd5641 100644
--- a/chromium/v8/tools/testrunner/local/statusfile.py
+++ b/chromium/v8/tools/testrunner/local/statusfile.py
@@ -282,7 +282,7 @@ def ReadStatusFile(content, variables):
def _ReadSection(section, variables, rules, prefix_rules):
assert type(section) == dict
- for rule, outcome_list in section.iteritems():
+ for rule, outcome_list in section.items():
assert type(rule) == str
if rule[-1] == '*':
diff --git a/chromium/v8/tools/testrunner/local/utils.py b/chromium/v8/tools/testrunner/local/utils.py
index 8fdc16b4bb7..94429a9fde9 100644
--- a/chromium/v8/tools/testrunner/local/utils.py
+++ b/chromium/v8/tools/testrunner/local/utils.py
@@ -97,6 +97,58 @@ def GuessOS():
return None
+# Returns power processor version, taking compatibility mode into account.
+# (Power9 running in Power8 compatibility mode returns 8)
+# Only useful if arch is ppc64
+def GuessPowerProcessorVersion():
+ import ctypes, ctypes.util
+ os = GuessOS()
+ if os == 'linux':
+ AT_PLATFORM = 15 # from linux/auxvec.h
+ _LIBC = ctypes.CDLL(ctypes.util.find_library('c'))
+ _LIBC.getauxval.argtypes = [ctypes.c_ulong]
+ _LIBC.getauxval.restype = ctypes.c_char_p
+ at_platform = _LIBC.getauxval(AT_PLATFORM).decode('utf-8').lower()
+ if at_platform.startswith('power6'):
+ return 6
+ elif at_platform.startswith('power7'):
+ return 7
+ elif at_platform.startswith('power8'):
+ return 8
+ elif at_platform.startswith('power9'):
+ return 9
+ elif at_platform.startswith('power10'):
+ return 10
+ else:
+ raise Exception('Unable to guess power processor version')
+ elif os == 'aix':
+ # covers aix and os400
+ RTLD_MEMBER = 0x00040000
+ _LIBC = ctypes.CDLL(ctypes.util.find_library('c'),
+ ctypes.DEFAULT_MODE | RTLD_MEMBER)
+ class _system_configuration(ctypes.Structure):
+ _fields_ = [
+ ('architecture', ctypes.c_int),
+ ('implementation', ctypes.c_int),
+ ]
+ cfg = _system_configuration.in_dll(_LIBC, '_system_configuration')
+ # Values found in sys/systemcfg.h
+ if cfg.implementation == 0x4000:
+ return 6
+ elif cfg.implementation == 0x8000:
+ return 7
+ elif cfg.implementation == 0x10000:
+ return 8
+ elif cfg.implementation == 0x20000:
+ return 9
+ elif cfg.implementation == 0x40000:
+ return 10
+ else:
+ raise Exception('Unable to guess power processor version')
+ else:
+ raise Exception('Unable to guess power processor version')
+
+
def UseSimulator(arch):
machine = platform.machine()
return (machine and
diff --git a/chromium/v8/tools/testrunner/local/variants.py b/chromium/v8/tools/testrunner/local/variants.py
index 0af6d3ab0e6..e214de05404 100644
--- a/chromium/v8/tools/testrunner/local/variants.py
+++ b/chromium/v8/tools/testrunner/local/variants.py
@@ -44,6 +44,7 @@ ALL_VARIANT_FLAGS = {
"instruction_scheduling": [["--turbo-instruction-scheduling"]],
"stress_instruction_scheduling": [["--turbo-stress-instruction-scheduling"]],
"top_level_await": [["--harmony-top-level-await"]],
+ "wasm_write_protect_code": [["--wasm-write-protect-code-memory"]],
}
# Flags that lead to a contradiction with the flags provided by the respective
@@ -59,9 +60,9 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
"stress_concurrent_inlining": ["--single-threaded", "--predictable",
- "--no-turbo-direct-heap-access"],
+ "--no-concurrent-inlining"],
+ "concurrent_inlining": ["--no-concurrent-inlining"],
"stress_incremental_marking": ["--no-stress-incremental-marking"],
- "future": ["--no-turbo-direct-heap-access"],
"stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"],
"stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff",
"--max-inlined-bytecode-size=*",
@@ -69,16 +70,17 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"--wasm-generic-wrapper"],
"sparkplug": ["--jitless", "--no-sparkplug" ],
"always_sparkplug": ["--jitless", "--no-sparkplug", "--no-always-sparkplug"],
- "turboprop": ["--interrupt-budget=*", "--no-turbo-direct-heap-access",
- "--no-turboprop"],
- "turboprop_as_toptier": ["--interrupt-budget=*",
- "--no-turbo-direct-heap-access", "--no-turboprop",
+ "turboprop": ["--interrupt-budget=*", "--no-turboprop"],
+ "turboprop_as_toptier": ["--interrupt-budget=*", "--no-turboprop",
"--no-turboprop-as-toptier"],
"code_serializer": ["--cache=after-execute", "--cache=full-code-cache",
"--cache=none"],
"no_local_heaps": ["--concurrent-inlining", "--turboprop"],
"experimental_regexp": ["--no-enable-experimental-regexp-engine",
"--no-default-to-experimental-regexp-engine"],
+ # There is a negative implication: --perf-prof disables
+ # --wasm-write-protect-code-memory.
+ "wasm_write_protect_code": ["--perf-prof"],
}
# Flags that lead to a contradiction under certain build variables.
@@ -101,6 +103,8 @@ INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
# The conflicts might be directly contradictory flags or be caused by the
# implications defined in flag-definitions.h.
INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
+ "--concurrent-inlining":
+ INCOMPATIBLE_FLAGS_PER_VARIANT["concurrent_inlining"],
"--concurrent-recompilation": ["--no-concurrent-recompilation", "--predictable"],
"--enable-armv8": ["--no-enable-armv8"],
"--gc-interval=*": ["--gc-interval=*"],
@@ -109,9 +113,9 @@ INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
"--no-enable-sse4-1": ["--enable-sse4-1"],
"--optimize-for-size": ["--max-semi-space-size=*"],
"--stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
- "--stress_concurrent_inlining": ["--single-threaded", "--predictable"],
+ "--stress-concurrent-inlining":
+ INCOMPATIBLE_FLAGS_PER_VARIANT["stress_concurrent_inlining"],
"--stress-flush-bytecode": ["--no-stress-flush-bytecode"],
- "--future": ["--no-turbo-direct-heap-access"],
"--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"],
}
diff --git a/chromium/v8/tools/testrunner/outproc/message.py b/chromium/v8/tools/testrunner/outproc/message.py
index c253b6f8e06..c301529eb7d 100644
--- a/chromium/v8/tools/testrunner/outproc/message.py
+++ b/chromium/v8/tools/testrunner/outproc/message.py
@@ -9,9 +9,11 @@ import re
from . import base
-class OutProc(base.OutProc):
- def __init__(self, expected_outcomes, basepath, expected_fail):
- super(OutProc, self).__init__(expected_outcomes)
+class OutProc(base.ExpectedOutProc):
+ def __init__(self, expected_outcomes, basepath, expected_fail,
+ expected_filename, regenerate_expected_files):
+ super(OutProc, self).__init__(expected_outcomes, expected_filename,
+ regenerate_expected_files)
self._basepath = basepath
self._expected_fail = expected_fail
diff --git a/chromium/v8/tools/testrunner/standard_runner.py b/chromium/v8/tools/testrunner/standard_runner.py
index f3551d01b88..94c25766824 100755
--- a/chromium/v8/tools/testrunner/standard_runner.py
+++ b/chromium/v8/tools/testrunner/standard_runner.py
@@ -46,7 +46,7 @@ VARIANT_ALIASES = {
'exhaustive': MORE_VARIANTS + VARIANTS,
# Additional variants, run on a subset of bots.
'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop',
- 'instruction_scheduling', 'turboprop_as_toptier'],
+ 'instruction_scheduling', 'always_sparkplug'],
}
# Extra flags passed to all tests using the standard test runner.
diff --git a/chromium/v8/tools/testrunner/testproc/fuzzer.py b/chromium/v8/tools/testrunner/testproc/fuzzer.py
index 965ba23d04f..1237da56b2e 100644
--- a/chromium/v8/tools/testrunner/testproc/fuzzer.py
+++ b/chromium/v8/tools/testrunner/testproc/fuzzer.py
@@ -43,6 +43,7 @@ EXTRA_FLAGS = [
(0.1, '--regexp-tier-up-ticks=10'),
(0.1, '--regexp-tier-up-ticks=100'),
(0.1, '--stress-background-compile'),
+ (0.1, '--stress-concurrent-inlining'),
(0.1, '--stress-lazy-source-positions'),
(0.1, '--stress-wasm-code-gc'),
(0.1, '--turbo-instruction-scheduling'),
diff --git a/chromium/v8/tools/tickprocessor-driver.mjs b/chromium/v8/tools/tickprocessor-driver.mjs
index e7020e388d2..c0507bfffee 100644
--- a/chromium/v8/tools/tickprocessor-driver.mjs
+++ b/chromium/v8/tools/tickprocessor-driver.mjs
@@ -73,6 +73,7 @@ const tickProcessor = new TickProcessor(
params.separateBytecodes,
params.separateBuiltins,
params.separateStubs,
+ params.separateBaselineHandlers,
params.callGraphSize,
params.ignoreUnknown,
params.stateFilter,
@@ -85,4 +86,9 @@ const tickProcessor = new TickProcessor(
params.runtimeTimerFilter,
params.preprocessJson);
tickProcessor.processLogFile(params.logFileName);
-tickProcessor.printStatistics();
+
+if (params.serializeVMSymbols) {
+ tickProcessor.printVMSymbols();
+} else {
+ tickProcessor.printStatistics();
+}
diff --git a/chromium/v8/tools/tickprocessor.mjs b/chromium/v8/tools/tickprocessor.mjs
index 1ad67e22c0d..8b59e573352 100644
--- a/chromium/v8/tools/tickprocessor.mjs
+++ b/chromium/v8/tools/tickprocessor.mjs
@@ -35,10 +35,12 @@ class V8Profile extends Profile {
static IC_RE =
/^(LoadGlobalIC: )|(Handler: )|(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Load|Store)IC_)/;
static BYTECODES_RE = /^(BytecodeHandler: )/;
+ static BASELINE_HANDLERS_RE = /^(Builtin: .*Baseline.*)/;
static BUILTINS_RE = /^(Builtin: )/;
static STUBS_RE = /^(Stub: )/;
- constructor(separateIc, separateBytecodes, separateBuiltins, separateStubs) {
+ constructor(separateIc, separateBytecodes, separateBuiltins, separateStubs,
+ separateBaselineHandlers) {
super();
const regexps = [];
if (!separateIc) regexps.push(V8Profile.IC_RE);
@@ -46,7 +48,7 @@ class V8Profile extends Profile {
if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE);
if (!separateStubs) regexps.push(V8Profile.STUBS_RE);
if (regexps.length > 0) {
- this.skipThisFunction = function (name) {
+ this.skipThisFunction = function(name) {
for (let i = 0; i < regexps.length; i++) {
if (regexps[i].test(name)) return true;
}
@@ -64,7 +66,7 @@ export function readFile(fileName) {
try {
return read(fileName);
} catch (e) {
- printErr(`${fileName}: ${e.message || e}`);
+ printErr(`file="${fileName}": ${e.message || e}`);
throw e;
}
}
@@ -77,6 +79,7 @@ export class TickProcessor extends LogReader {
separateBytecodes,
separateBuiltins,
separateStubs,
+ separateBaselineHandlers,
callGraphSize,
ignoreUnknown,
stateFilter,
@@ -211,7 +214,7 @@ export class TickProcessor extends LogReader {
this.profile_ = new JsonProfile();
} else {
this.profile_ = new V8Profile(separateIc, separateBytecodes,
- separateBuiltins, separateStubs);
+ separateBuiltins, separateStubs, separateBaselineHandlers);
}
this.codeTypes_ = {};
// Count each tick as a time unit.
@@ -228,6 +231,7 @@ export class TickProcessor extends LogReader {
GC: 1,
PARSER: 2,
BYTECODE_COMPILER: 3,
+ // TODO(cbruni): add BASELINE_COMPILER
COMPILER: 4,
OTHER: 5,
EXTERNAL: 6,
@@ -285,7 +289,7 @@ export class TickProcessor extends LogReader {
processSharedLibrary(name, startAddr, endAddr, aslrSlide) {
const entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
this.setCodeType(entry.getName(), 'SHARED_LIB');
- const libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+ this.cppEntriesProvider_.parseVmSymbols(
name, startAddr, endAddr, aslrSlide, (fName, fStart, fEnd) => {
this.profile_.addStaticCode(fName, fStart, fEnd);
this.setCodeType(fName, 'CPP');
@@ -409,6 +413,11 @@ export class TickProcessor extends LogReader {
this.generation_++;
}
+ printVMSymbols() {
+ console.log(
+ JSON.stringify(this.profile_.serializeVMSymbols()));
+ }
+
printStatistics() {
if (this.preprocessJson) {
this.profile_.writeJson();
@@ -854,6 +863,8 @@ export class ArgumentsProcessor extends BaseArgumentsProcessor {
'Separate Builtin entries'],
'--separate-stubs': ['separateStubs', parseBool,
'Separate Stub entries'],
+ '--separate-baseline-handlers': ['separateBaselineHandlers', parseBool,
+ 'Separate Baseline Handler entries'],
'--unix': ['platform', 'unix',
'Specify that we are running on *nix platform'],
'--windows': ['platform', 'windows',
@@ -880,6 +891,8 @@ export class ArgumentsProcessor extends BaseArgumentsProcessor {
'Ignore ticks outside pairs of Date.now() calls'],
'--only-summary': ['onlySummary', true,
'Print only tick summary, exclude other information'],
+ '--serialize-vm-symbols': ['serializeVMSymbols', true,
+ 'Print all C++ symbols and library addresses as JSON data'],
'--preprocess': ['preprocessJson', true,
'Preprocess for consumption with web interface']
};
@@ -903,6 +916,7 @@ export class ArgumentsProcessor extends BaseArgumentsProcessor {
separateBytecodes: false,
separateBuiltins: true,
separateStubs: true,
+ separateBaselineHandlers: false,
preprocessJson: null,
targetRootFS: '',
nm: 'nm',
@@ -913,6 +927,7 @@ export class ArgumentsProcessor extends BaseArgumentsProcessor {
pairwiseTimedRange: false,
onlySummary: false,
runtimeTimerFilter: null,
+ serializeVMSymbols: false,
};
}
}
diff --git a/chromium/v8/tools/ubsan/blacklist.txt b/chromium/v8/tools/ubsan/ignorelist.txt
index ea4e79bf522..4e476abe492 100644
--- a/chromium/v8/tools/ubsan/blacklist.txt
+++ b/chromium/v8/tools/ubsan/ignorelist.txt
@@ -1,5 +1,5 @@
#############################################################################
-# UBSan blacklist.
+# UBSan ignorelist.
# Bug 8735: PropertyCallbackInfo<void> vs PropertyCallbackInfo<T>.
fun:*v8*internal*PropertyCallbackArguments*CallAccessorSetter*
diff --git a/chromium/v8/tools/ubsan/vptr_blacklist.txt b/chromium/v8/tools/ubsan/vptr_ignorelist.txt
index ccad5b10615..26c30ed02d5 100644
--- a/chromium/v8/tools/ubsan/vptr_blacklist.txt
+++ b/chromium/v8/tools/ubsan/vptr_ignorelist.txt
@@ -1,9 +1,9 @@
#############################################################################
-# UBSan vptr blacklist.
-# Function and type based blacklisting use a mangled name, and it is especially
+# UBSan vptr ignorelist.
+# Function and type based ignorelisting use a mangled name, and it is especially
# tricky to represent C++ types. For now, any possible changes by name manglings
# are simply represented as wildcard expressions of regexp, and thus it might be
-# over-blacklisted.
+# over-ignorelisted.
#############################################################################
# UBsan goes into an infinite recursion when __dynamic_cast instrumented with
diff --git a/chromium/v8/tools/unittests/run_tests_test.py b/chromium/v8/tools/unittests/run_tests_test.py
index f174a239bf2..ae3833abc81 100755
--- a/chromium/v8/tools/unittests/run_tests_test.py
+++ b/chromium/v8/tools/unittests/run_tests_test.py
@@ -334,7 +334,8 @@ class SystemTest(unittest.TestCase):
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
- v8_enable_pointer_compression=False)
+ v8_enable_pointer_compression=False,
+ v8_enable_pointer_compression_shared_cage=False)
result = run_tests(
basedir,
'--progress=verbose',
diff --git a/chromium/v8/tools/v8_presubmit.py b/chromium/v8/tools/v8_presubmit.py
index db008aabf17..f4212794513 100755
--- a/chromium/v8/tools/v8_presubmit.py
+++ b/chromium/v8/tools/v8_presubmit.py
@@ -63,6 +63,8 @@ from testrunner.local import utils
# runtime/references: As of May 2020 the C++ style guide suggests using
# references for out parameters, see
# https://google.github.io/styleguide/cppguide.html#Inputs_and_Outputs.
+# whitespace/braces: Doesn't handle {}-initialization for custom types
+# well; also should be subsumed by clang-format.
LINT_RULES = """
-build/header_guard
@@ -70,6 +72,7 @@ LINT_RULES = """
-readability/fn_size
-readability/multiline_comment
-runtime/references
+-whitespace/braces
-whitespace/comments
""".split()
diff --git a/chromium/v8/tools/v8heapconst.py b/chromium/v8/tools/v8heapconst.py
index f0cc6481a88..c28123dcec9 100644
--- a/chromium/v8/tools/v8heapconst.py
+++ b/chromium/v8/tools/v8heapconst.py
@@ -32,126 +32,128 @@ INSTANCE_TYPES = {
68: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
69: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
70: "FOREIGN_TYPE",
- 71: "WASM_TYPE_INFO_TYPE",
- 72: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
- 73: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
- 74: "CALLABLE_TASK_TYPE",
- 75: "CALLBACK_TASK_TYPE",
- 76: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
- 77: "LOAD_HANDLER_TYPE",
- 78: "STORE_HANDLER_TYPE",
- 79: "FUNCTION_TEMPLATE_INFO_TYPE",
- 80: "OBJECT_TEMPLATE_INFO_TYPE",
- 81: "ACCESS_CHECK_INFO_TYPE",
- 82: "ACCESSOR_INFO_TYPE",
- 83: "ACCESSOR_PAIR_TYPE",
- 84: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 85: "ALLOCATION_MEMENTO_TYPE",
- 86: "ALLOCATION_SITE_TYPE",
- 87: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
- 88: "ASM_WASM_DATA_TYPE",
- 89: "ASYNC_GENERATOR_REQUEST_TYPE",
- 90: "BASELINE_DATA_TYPE",
- 91: "BREAK_POINT_TYPE",
- 92: "BREAK_POINT_INFO_TYPE",
- 93: "CACHED_TEMPLATE_OBJECT_TYPE",
- 94: "CALL_HANDLER_INFO_TYPE",
- 95: "CLASS_POSITIONS_TYPE",
- 96: "DEBUG_INFO_TYPE",
- 97: "ENUM_CACHE_TYPE",
- 98: "FEEDBACK_CELL_TYPE",
- 99: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 100: "INTERCEPTOR_INFO_TYPE",
- 101: "INTERPRETER_DATA_TYPE",
- 102: "MODULE_REQUEST_TYPE",
- 103: "PROMISE_CAPABILITY_TYPE",
- 104: "PROMISE_REACTION_TYPE",
- 105: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 106: "PROTOTYPE_INFO_TYPE",
- 107: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
- 108: "SCRIPT_TYPE",
- 109: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 110: "STACK_FRAME_INFO_TYPE",
- 111: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 112: "TUPLE2_TYPE",
- 113: "WASM_EXCEPTION_TAG_TYPE",
- 114: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
- 115: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 116: "WASM_JS_FUNCTION_DATA_TYPE",
- 117: "FIXED_ARRAY_TYPE",
- 118: "HASH_TABLE_TYPE",
- 119: "EPHEMERON_HASH_TABLE_TYPE",
- 120: "GLOBAL_DICTIONARY_TYPE",
- 121: "NAME_DICTIONARY_TYPE",
- 122: "NUMBER_DICTIONARY_TYPE",
- 123: "ORDERED_HASH_MAP_TYPE",
- 124: "ORDERED_HASH_SET_TYPE",
- 125: "ORDERED_NAME_DICTIONARY_TYPE",
- 126: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 128: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 129: "SCRIPT_CONTEXT_TABLE_TYPE",
- 130: "BYTE_ARRAY_TYPE",
- 131: "BYTECODE_ARRAY_TYPE",
- 132: "FIXED_DOUBLE_ARRAY_TYPE",
- 133: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 134: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 135: "AWAIT_CONTEXT_TYPE",
- 136: "BLOCK_CONTEXT_TYPE",
- 137: "CATCH_CONTEXT_TYPE",
- 138: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 139: "EVAL_CONTEXT_TYPE",
- 140: "FUNCTION_CONTEXT_TYPE",
- 141: "MODULE_CONTEXT_TYPE",
- 142: "NATIVE_CONTEXT_TYPE",
- 143: "SCRIPT_CONTEXT_TYPE",
- 144: "WITH_CONTEXT_TYPE",
- 145: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 146: "EXPORTED_SUB_CLASS_TYPE",
- 147: "EXPORTED_SUB_CLASS2_TYPE",
- 148: "SMALL_ORDERED_HASH_MAP_TYPE",
- 149: "SMALL_ORDERED_HASH_SET_TYPE",
- 150: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 151: "DESCRIPTOR_ARRAY_TYPE",
- 152: "STRONG_DESCRIPTOR_ARRAY_TYPE",
- 153: "SOURCE_TEXT_MODULE_TYPE",
- 154: "SYNTHETIC_MODULE_TYPE",
- 155: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 156: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 157: "WEAK_FIXED_ARRAY_TYPE",
- 158: "TRANSITION_ARRAY_TYPE",
- 159: "CELL_TYPE",
- 160: "CODE_TYPE",
- 161: "CODE_DATA_CONTAINER_TYPE",
- 162: "COVERAGE_INFO_TYPE",
- 163: "EMBEDDER_DATA_ARRAY_TYPE",
- 164: "FEEDBACK_METADATA_TYPE",
- 165: "FEEDBACK_VECTOR_TYPE",
- 166: "FILLER_TYPE",
- 167: "FREE_SPACE_TYPE",
- 168: "INTERNAL_CLASS_TYPE",
- 169: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 170: "MAP_TYPE",
- 171: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 172: "PREPARSE_DATA_TYPE",
- 173: "PROPERTY_ARRAY_TYPE",
- 174: "PROPERTY_CELL_TYPE",
- 175: "SCOPE_INFO_TYPE",
- 176: "SHARED_FUNCTION_INFO_TYPE",
- 177: "SMI_BOX_TYPE",
- 178: "SMI_PAIR_TYPE",
- 179: "SORT_STATE_TYPE",
- 180: "SWISS_NAME_DICTIONARY_TYPE",
- 181: "WASM_ARRAY_TYPE",
- 182: "WASM_CAPI_FUNCTION_DATA_TYPE",
- 183: "WASM_STRUCT_TYPE",
- 184: "WEAK_ARRAY_LIST_TYPE",
- 185: "WEAK_CELL_TYPE",
- 186: "JS_PROXY_TYPE",
+ 71: "WASM_FUNCTION_DATA_TYPE",
+ 72: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+ 73: "WASM_JS_FUNCTION_DATA_TYPE",
+ 74: "WASM_TYPE_INFO_TYPE",
+ 75: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+ 76: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+ 77: "CALLABLE_TASK_TYPE",
+ 78: "CALLBACK_TASK_TYPE",
+ 79: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+ 80: "LOAD_HANDLER_TYPE",
+ 81: "STORE_HANDLER_TYPE",
+ 82: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 83: "OBJECT_TEMPLATE_INFO_TYPE",
+ 84: "ACCESS_CHECK_INFO_TYPE",
+ 85: "ACCESSOR_INFO_TYPE",
+ 86: "ACCESSOR_PAIR_TYPE",
+ 87: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 88: "ALLOCATION_MEMENTO_TYPE",
+ 89: "ALLOCATION_SITE_TYPE",
+ 90: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+ 91: "ASM_WASM_DATA_TYPE",
+ 92: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 93: "BASELINE_DATA_TYPE",
+ 94: "BREAK_POINT_TYPE",
+ 95: "BREAK_POINT_INFO_TYPE",
+ 96: "CACHED_TEMPLATE_OBJECT_TYPE",
+ 97: "CALL_HANDLER_INFO_TYPE",
+ 98: "CLASS_POSITIONS_TYPE",
+ 99: "DEBUG_INFO_TYPE",
+ 100: "ENUM_CACHE_TYPE",
+ 101: "FEEDBACK_CELL_TYPE",
+ 102: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 103: "INTERCEPTOR_INFO_TYPE",
+ 104: "INTERPRETER_DATA_TYPE",
+ 105: "MODULE_REQUEST_TYPE",
+ 106: "PROMISE_CAPABILITY_TYPE",
+ 107: "PROMISE_REACTION_TYPE",
+ 108: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 109: "PROTOTYPE_INFO_TYPE",
+ 110: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
+ 111: "SCRIPT_TYPE",
+ 112: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 113: "STACK_FRAME_INFO_TYPE",
+ 114: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 115: "TUPLE2_TYPE",
+ 116: "WASM_EXCEPTION_TAG_TYPE",
+ 117: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 118: "FIXED_ARRAY_TYPE",
+ 119: "HASH_TABLE_TYPE",
+ 120: "EPHEMERON_HASH_TABLE_TYPE",
+ 121: "GLOBAL_DICTIONARY_TYPE",
+ 122: "NAME_DICTIONARY_TYPE",
+ 123: "NUMBER_DICTIONARY_TYPE",
+ 124: "ORDERED_HASH_MAP_TYPE",
+ 125: "ORDERED_HASH_SET_TYPE",
+ 126: "ORDERED_NAME_DICTIONARY_TYPE",
+ 127: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 128: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 129: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 130: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 131: "BYTE_ARRAY_TYPE",
+ 132: "BYTECODE_ARRAY_TYPE",
+ 133: "FIXED_DOUBLE_ARRAY_TYPE",
+ 134: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+ 135: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+ 136: "AWAIT_CONTEXT_TYPE",
+ 137: "BLOCK_CONTEXT_TYPE",
+ 138: "CATCH_CONTEXT_TYPE",
+ 139: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 140: "EVAL_CONTEXT_TYPE",
+ 141: "FUNCTION_CONTEXT_TYPE",
+ 142: "MODULE_CONTEXT_TYPE",
+ 143: "NATIVE_CONTEXT_TYPE",
+ 144: "SCRIPT_CONTEXT_TYPE",
+ 145: "WITH_CONTEXT_TYPE",
+ 146: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 147: "EXPORTED_SUB_CLASS_TYPE",
+ 148: "EXPORTED_SUB_CLASS2_TYPE",
+ 149: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 150: "SMALL_ORDERED_HASH_SET_TYPE",
+ 151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 152: "DESCRIPTOR_ARRAY_TYPE",
+ 153: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 154: "SOURCE_TEXT_MODULE_TYPE",
+ 155: "SYNTHETIC_MODULE_TYPE",
+ 156: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 158: "WEAK_FIXED_ARRAY_TYPE",
+ 159: "TRANSITION_ARRAY_TYPE",
+ 160: "CELL_TYPE",
+ 161: "CODE_TYPE",
+ 162: "CODE_DATA_CONTAINER_TYPE",
+ 163: "COVERAGE_INFO_TYPE",
+ 164: "EMBEDDER_DATA_ARRAY_TYPE",
+ 165: "FEEDBACK_METADATA_TYPE",
+ 166: "FEEDBACK_VECTOR_TYPE",
+ 167: "FILLER_TYPE",
+ 168: "FREE_SPACE_TYPE",
+ 169: "INTERNAL_CLASS_TYPE",
+ 170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 171: "MAP_TYPE",
+ 172: "MEGA_DOM_HANDLER_TYPE",
+ 173: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 174: "PREPARSE_DATA_TYPE",
+ 175: "PROPERTY_ARRAY_TYPE",
+ 176: "PROPERTY_CELL_TYPE",
+ 177: "SCOPE_INFO_TYPE",
+ 178: "SHARED_FUNCTION_INFO_TYPE",
+ 179: "SMI_BOX_TYPE",
+ 180: "SMI_PAIR_TYPE",
+ 181: "SORT_STATE_TYPE",
+ 182: "SWISS_NAME_DICTIONARY_TYPE",
+ 183: "WASM_ARRAY_TYPE",
+ 184: "WASM_CAPI_FUNCTION_DATA_TYPE",
+ 185: "WASM_STRUCT_TYPE",
+ 186: "WEAK_ARRAY_LIST_TYPE",
+ 187: "WEAK_CELL_TYPE",
+ 188: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 187: "JS_GLOBAL_OBJECT_TYPE",
- 188: "JS_GLOBAL_PROXY_TYPE",
- 189: "JS_MODULE_NAMESPACE_TYPE",
+ 189: "JS_GLOBAL_OBJECT_TYPE",
+ 190: "JS_GLOBAL_PROXY_TYPE",
+ 191: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1042: "JS_ARRAY_ITERATOR_PROTOTYPE_TYPE",
@@ -233,159 +235,160 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02119): (170, "MetaMap"),
+ ("read_only_space", 0x02119): (171, "MetaMap"),
("read_only_space", 0x02141): (67, "NullMap"),
- ("read_only_space", 0x02169): (152, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x02191): (157, "WeakFixedArrayMap"),
- ("read_only_space", 0x021d1): (97, "EnumCacheMap"),
- ("read_only_space", 0x02205): (117, "FixedArrayMap"),
+ ("read_only_space", 0x02169): (153, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x02191): (158, "WeakFixedArrayMap"),
+ ("read_only_space", 0x021d1): (100, "EnumCacheMap"),
+ ("read_only_space", 0x02205): (118, "FixedArrayMap"),
("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x0229d): (167, "FreeSpaceMap"),
- ("read_only_space", 0x022c5): (166, "OnePointerFillerMap"),
- ("read_only_space", 0x022ed): (166, "TwoPointerFillerMap"),
+ ("read_only_space", 0x0229d): (168, "FreeSpaceMap"),
+ ("read_only_space", 0x022c5): (167, "OnePointerFillerMap"),
+ ("read_only_space", 0x022ed): (167, "TwoPointerFillerMap"),
("read_only_space", 0x02315): (67, "UninitializedMap"),
("read_only_space", 0x0238d): (67, "UndefinedMap"),
("read_only_space", 0x023d1): (66, "HeapNumberMap"),
("read_only_space", 0x02405): (67, "TheHoleMap"),
("read_only_space", 0x02465): (67, "BooleanMap"),
- ("read_only_space", 0x02509): (130, "ByteArrayMap"),
- ("read_only_space", 0x02531): (117, "FixedCOWArrayMap"),
- ("read_only_space", 0x02559): (118, "HashTableMap"),
+ ("read_only_space", 0x02509): (131, "ByteArrayMap"),
+ ("read_only_space", 0x02531): (118, "FixedCOWArrayMap"),
+ ("read_only_space", 0x02559): (119, "HashTableMap"),
("read_only_space", 0x02581): (64, "SymbolMap"),
("read_only_space", 0x025a9): (40, "OneByteStringMap"),
- ("read_only_space", 0x025d1): (175, "ScopeInfoMap"),
- ("read_only_space", 0x025f9): (176, "SharedFunctionInfoMap"),
- ("read_only_space", 0x02621): (160, "CodeMap"),
- ("read_only_space", 0x02649): (159, "CellMap"),
- ("read_only_space", 0x02671): (174, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x025d1): (177, "ScopeInfoMap"),
+ ("read_only_space", 0x025f9): (178, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x02621): (161, "CodeMap"),
+ ("read_only_space", 0x02649): (160, "CellMap"),
+ ("read_only_space", 0x02671): (176, "GlobalPropertyCellMap"),
("read_only_space", 0x02699): (70, "ForeignMap"),
- ("read_only_space", 0x026c1): (158, "TransitionArrayMap"),
+ ("read_only_space", 0x026c1): (159, "TransitionArrayMap"),
("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x02711): (165, "FeedbackVectorMap"),
+ ("read_only_space", 0x02711): (166, "FeedbackVectorMap"),
("read_only_space", 0x02749): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x027a9): (67, "ExceptionMap"),
("read_only_space", 0x02805): (67, "TerminationExceptionMap"),
("read_only_space", 0x0286d): (67, "OptimizedOutMap"),
("read_only_space", 0x028cd): (67, "StaleRegisterMap"),
- ("read_only_space", 0x0292d): (129, "ScriptContextTableMap"),
- ("read_only_space", 0x02955): (127, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x0297d): (164, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029a5): (117, "ArrayListMap"),
+ ("read_only_space", 0x0292d): (130, "ScriptContextTableMap"),
+ ("read_only_space", 0x02955): (128, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x0297d): (165, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x029a5): (118, "ArrayListMap"),
("read_only_space", 0x029cd): (65, "BigIntMap"),
- ("read_only_space", 0x029f5): (128, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a1d): (131, "BytecodeArrayMap"),
- ("read_only_space", 0x02a45): (161, "CodeDataContainerMap"),
- ("read_only_space", 0x02a6d): (162, "CoverageInfoMap"),
- ("read_only_space", 0x02a95): (132, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02abd): (120, "GlobalDictionaryMap"),
- ("read_only_space", 0x02ae5): (98, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b0d): (117, "ModuleInfoMap"),
- ("read_only_space", 0x02b35): (121, "NameDictionaryMap"),
- ("read_only_space", 0x02b5d): (98, "NoClosuresCellMap"),
- ("read_only_space", 0x02b85): (122, "NumberDictionaryMap"),
- ("read_only_space", 0x02bad): (98, "OneClosureCellMap"),
- ("read_only_space", 0x02bd5): (123, "OrderedHashMapMap"),
- ("read_only_space", 0x02bfd): (124, "OrderedHashSetMap"),
- ("read_only_space", 0x02c25): (125, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02c4d): (172, "PreparseDataMap"),
- ("read_only_space", 0x02c75): (173, "PropertyArrayMap"),
- ("read_only_space", 0x02c9d): (94, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02cc5): (94, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02ced): (94, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d15): (126, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02d3d): (148, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02d65): (149, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02d8d): (150, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02db5): (153, "SourceTextModuleMap"),
- ("read_only_space", 0x02ddd): (180, "SwissNameDictionaryMap"),
- ("read_only_space", 0x02e05): (154, "SyntheticModuleMap"),
- ("read_only_space", 0x02e2d): (71, "WasmTypeInfoMap"),
- ("read_only_space", 0x02e55): (184, "WeakArrayListMap"),
- ("read_only_space", 0x02e7d): (119, "EphemeronHashTableMap"),
- ("read_only_space", 0x02ea5): (163, "EmbedderDataArrayMap"),
- ("read_only_space", 0x02ecd): (185, "WeakCellMap"),
- ("read_only_space", 0x02ef5): (32, "StringMap"),
- ("read_only_space", 0x02f1d): (41, "ConsOneByteStringMap"),
- ("read_only_space", 0x02f45): (33, "ConsStringMap"),
- ("read_only_space", 0x02f6d): (37, "ThinStringMap"),
- ("read_only_space", 0x02f95): (35, "SlicedStringMap"),
- ("read_only_space", 0x02fbd): (43, "SlicedOneByteStringMap"),
- ("read_only_space", 0x02fe5): (34, "ExternalStringMap"),
- ("read_only_space", 0x0300d): (42, "ExternalOneByteStringMap"),
- ("read_only_space", 0x03035): (50, "UncachedExternalStringMap"),
- ("read_only_space", 0x0305d): (0, "InternalizedStringMap"),
- ("read_only_space", 0x03085): (2, "ExternalInternalizedStringMap"),
- ("read_only_space", 0x030ad): (10, "ExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x030d5): (18, "UncachedExternalInternalizedStringMap"),
- ("read_only_space", 0x030fd): (26, "UncachedExternalOneByteInternalizedStringMap"),
- ("read_only_space", 0x03125): (58, "UncachedExternalOneByteStringMap"),
- ("read_only_space", 0x0314d): (67, "SelfReferenceMarkerMap"),
- ("read_only_space", 0x03175): (67, "BasicBlockCountersMarkerMap"),
- ("read_only_space", 0x031b9): (87, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x032b9): (100, "InterceptorInfoMap"),
- ("read_only_space", 0x05401): (72, "PromiseFulfillReactionJobTaskMap"),
- ("read_only_space", 0x05429): (73, "PromiseRejectReactionJobTaskMap"),
- ("read_only_space", 0x05451): (74, "CallableTaskMap"),
- ("read_only_space", 0x05479): (75, "CallbackTaskMap"),
- ("read_only_space", 0x054a1): (76, "PromiseResolveThenableJobTaskMap"),
- ("read_only_space", 0x054c9): (79, "FunctionTemplateInfoMap"),
- ("read_only_space", 0x054f1): (80, "ObjectTemplateInfoMap"),
- ("read_only_space", 0x05519): (81, "AccessCheckInfoMap"),
- ("read_only_space", 0x05541): (82, "AccessorInfoMap"),
- ("read_only_space", 0x05569): (83, "AccessorPairMap"),
- ("read_only_space", 0x05591): (84, "AliasedArgumentsEntryMap"),
- ("read_only_space", 0x055b9): (85, "AllocationMementoMap"),
- ("read_only_space", 0x055e1): (88, "AsmWasmDataMap"),
- ("read_only_space", 0x05609): (89, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x05631): (90, "BaselineDataMap"),
- ("read_only_space", 0x05659): (91, "BreakPointMap"),
- ("read_only_space", 0x05681): (92, "BreakPointInfoMap"),
- ("read_only_space", 0x056a9): (93, "CachedTemplateObjectMap"),
- ("read_only_space", 0x056d1): (95, "ClassPositionsMap"),
- ("read_only_space", 0x056f9): (96, "DebugInfoMap"),
- ("read_only_space", 0x05721): (99, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x05749): (101, "InterpreterDataMap"),
- ("read_only_space", 0x05771): (102, "ModuleRequestMap"),
- ("read_only_space", 0x05799): (103, "PromiseCapabilityMap"),
- ("read_only_space", 0x057c1): (104, "PromiseReactionMap"),
- ("read_only_space", 0x057e9): (105, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x05811): (106, "PrototypeInfoMap"),
- ("read_only_space", 0x05839): (107, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x05861): (108, "ScriptMap"),
- ("read_only_space", 0x05889): (109, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x058b1): (110, "StackFrameInfoMap"),
- ("read_only_space", 0x058d9): (111, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x05901): (112, "Tuple2Map"),
- ("read_only_space", 0x05929): (113, "WasmExceptionTagMap"),
- ("read_only_space", 0x05951): (114, "WasmExportedFunctionDataMap"),
- ("read_only_space", 0x05979): (115, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x059a1): (116, "WasmJSFunctionDataMap"),
- ("read_only_space", 0x059c9): (134, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x059f1): (151, "DescriptorArrayMap"),
- ("read_only_space", 0x05a19): (156, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x05a41): (155, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x05a69): (171, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x05a91): (168, "InternalClassMap"),
- ("read_only_space", 0x05ab9): (178, "SmiPairMap"),
- ("read_only_space", 0x05ae1): (177, "SmiBoxMap"),
- ("read_only_space", 0x05b09): (145, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x05b31): (146, "ExportedSubClassMap"),
- ("read_only_space", 0x05b59): (68, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x05b81): (69, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x05ba9): (133, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x05bd1): (169, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x05bf9): (147, "ExportedSubClass2Map"),
- ("read_only_space", 0x05c21): (179, "SortStateMap"),
- ("read_only_space", 0x05c49): (182, "WasmCapiFunctionDataMap"),
- ("read_only_space", 0x05c71): (86, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05c99): (86, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05cc1): (77, "LoadHandler1Map"),
- ("read_only_space", 0x05ce9): (77, "LoadHandler2Map"),
- ("read_only_space", 0x05d11): (77, "LoadHandler3Map"),
- ("read_only_space", 0x05d39): (78, "StoreHandler0Map"),
- ("read_only_space", 0x05d61): (78, "StoreHandler1Map"),
- ("read_only_space", 0x05d89): (78, "StoreHandler2Map"),
- ("read_only_space", 0x05db1): (78, "StoreHandler3Map"),
+ ("read_only_space", 0x029f5): (129, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02a1d): (132, "BytecodeArrayMap"),
+ ("read_only_space", 0x02a45): (162, "CodeDataContainerMap"),
+ ("read_only_space", 0x02a6d): (163, "CoverageInfoMap"),
+ ("read_only_space", 0x02a95): (133, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x02abd): (121, "GlobalDictionaryMap"),
+ ("read_only_space", 0x02ae5): (101, "ManyClosuresCellMap"),
+ ("read_only_space", 0x02b0d): (172, "MegaDomHandlerMap"),
+ ("read_only_space", 0x02b35): (118, "ModuleInfoMap"),
+ ("read_only_space", 0x02b5d): (122, "NameDictionaryMap"),
+ ("read_only_space", 0x02b85): (101, "NoClosuresCellMap"),
+ ("read_only_space", 0x02bad): (123, "NumberDictionaryMap"),
+ ("read_only_space", 0x02bd5): (101, "OneClosureCellMap"),
+ ("read_only_space", 0x02bfd): (124, "OrderedHashMapMap"),
+ ("read_only_space", 0x02c25): (125, "OrderedHashSetMap"),
+ ("read_only_space", 0x02c4d): (126, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x02c75): (174, "PreparseDataMap"),
+ ("read_only_space", 0x02c9d): (175, "PropertyArrayMap"),
+ ("read_only_space", 0x02cc5): (97, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x02ced): (97, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d15): (97, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d3d): (127, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x02d65): (149, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x02d8d): (150, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x02db5): (151, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x02ddd): (154, "SourceTextModuleMap"),
+ ("read_only_space", 0x02e05): (182, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x02e2d): (155, "SyntheticModuleMap"),
+ ("read_only_space", 0x02e55): (72, "WasmExportedFunctionDataMap"),
+ ("read_only_space", 0x02e7d): (73, "WasmJSFunctionDataMap"),
+ ("read_only_space", 0x02ea5): (74, "WasmTypeInfoMap"),
+ ("read_only_space", 0x02ecd): (186, "WeakArrayListMap"),
+ ("read_only_space", 0x02ef5): (120, "EphemeronHashTableMap"),
+ ("read_only_space", 0x02f1d): (164, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x02f45): (187, "WeakCellMap"),
+ ("read_only_space", 0x02f6d): (32, "StringMap"),
+ ("read_only_space", 0x02f95): (41, "ConsOneByteStringMap"),
+ ("read_only_space", 0x02fbd): (33, "ConsStringMap"),
+ ("read_only_space", 0x02fe5): (37, "ThinStringMap"),
+ ("read_only_space", 0x0300d): (35, "SlicedStringMap"),
+ ("read_only_space", 0x03035): (43, "SlicedOneByteStringMap"),
+ ("read_only_space", 0x0305d): (34, "ExternalStringMap"),
+ ("read_only_space", 0x03085): (42, "ExternalOneByteStringMap"),
+ ("read_only_space", 0x030ad): (50, "UncachedExternalStringMap"),
+ ("read_only_space", 0x030d5): (0, "InternalizedStringMap"),
+ ("read_only_space", 0x030fd): (2, "ExternalInternalizedStringMap"),
+ ("read_only_space", 0x03125): (10, "ExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x0314d): (18, "UncachedExternalInternalizedStringMap"),
+ ("read_only_space", 0x03175): (26, "UncachedExternalOneByteInternalizedStringMap"),
+ ("read_only_space", 0x0319d): (58, "UncachedExternalOneByteStringMap"),
+ ("read_only_space", 0x031c5): (67, "SelfReferenceMarkerMap"),
+ ("read_only_space", 0x031ed): (67, "BasicBlockCountersMarkerMap"),
+ ("read_only_space", 0x03231): (90, "ArrayBoilerplateDescriptionMap"),
+ ("read_only_space", 0x03331): (103, "InterceptorInfoMap"),
+ ("read_only_space", 0x05639): (75, "PromiseFulfillReactionJobTaskMap"),
+ ("read_only_space", 0x05661): (76, "PromiseRejectReactionJobTaskMap"),
+ ("read_only_space", 0x05689): (77, "CallableTaskMap"),
+ ("read_only_space", 0x056b1): (78, "CallbackTaskMap"),
+ ("read_only_space", 0x056d9): (79, "PromiseResolveThenableJobTaskMap"),
+ ("read_only_space", 0x05701): (82, "FunctionTemplateInfoMap"),
+ ("read_only_space", 0x05729): (83, "ObjectTemplateInfoMap"),
+ ("read_only_space", 0x05751): (84, "AccessCheckInfoMap"),
+ ("read_only_space", 0x05779): (85, "AccessorInfoMap"),
+ ("read_only_space", 0x057a1): (86, "AccessorPairMap"),
+ ("read_only_space", 0x057c9): (87, "AliasedArgumentsEntryMap"),
+ ("read_only_space", 0x057f1): (88, "AllocationMementoMap"),
+ ("read_only_space", 0x05819): (91, "AsmWasmDataMap"),
+ ("read_only_space", 0x05841): (92, "AsyncGeneratorRequestMap"),
+ ("read_only_space", 0x05869): (93, "BaselineDataMap"),
+ ("read_only_space", 0x05891): (94, "BreakPointMap"),
+ ("read_only_space", 0x058b9): (95, "BreakPointInfoMap"),
+ ("read_only_space", 0x058e1): (96, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x05909): (98, "ClassPositionsMap"),
+ ("read_only_space", 0x05931): (99, "DebugInfoMap"),
+ ("read_only_space", 0x05959): (102, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x05981): (104, "InterpreterDataMap"),
+ ("read_only_space", 0x059a9): (105, "ModuleRequestMap"),
+ ("read_only_space", 0x059d1): (106, "PromiseCapabilityMap"),
+ ("read_only_space", 0x059f9): (107, "PromiseReactionMap"),
+ ("read_only_space", 0x05a21): (108, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x05a49): (109, "PrototypeInfoMap"),
+ ("read_only_space", 0x05a71): (110, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x05a99): (111, "ScriptMap"),
+ ("read_only_space", 0x05ac1): (112, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x05ae9): (113, "StackFrameInfoMap"),
+ ("read_only_space", 0x05b11): (114, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05b39): (115, "Tuple2Map"),
+ ("read_only_space", 0x05b61): (116, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05b89): (117, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x05bb1): (135, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x05bd9): (152, "DescriptorArrayMap"),
+ ("read_only_space", 0x05c01): (157, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x05c29): (156, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x05c51): (173, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x05c79): (169, "InternalClassMap"),
+ ("read_only_space", 0x05ca1): (180, "SmiPairMap"),
+ ("read_only_space", 0x05cc9): (179, "SmiBoxMap"),
+ ("read_only_space", 0x05cf1): (146, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x05d19): (147, "ExportedSubClassMap"),
+ ("read_only_space", 0x05d41): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x05d69): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x05d91): (134, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x05db9): (170, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x05de1): (148, "ExportedSubClass2Map"),
+ ("read_only_space", 0x05e09): (181, "SortStateMap"),
+ ("read_only_space", 0x05e31): (184, "WasmCapiFunctionDataMap"),
+ ("read_only_space", 0x05e59): (89, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05e81): (89, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05ea9): (80, "LoadHandler1Map"),
+ ("read_only_space", 0x05ed1): (80, "LoadHandler2Map"),
+ ("read_only_space", 0x05ef9): (80, "LoadHandler3Map"),
+ ("read_only_space", 0x05f21): (81, "StoreHandler0Map"),
+ ("read_only_space", 0x05f49): (81, "StoreHandler1Map"),
+ ("read_only_space", 0x05f71): (81, "StoreHandler2Map"),
+ ("read_only_space", 0x05f99): (81, "StoreHandler3Map"),
("map_space", 0x02119): (1057, "ExternalMap"),
("map_space", 0x02141): (1098, "JSMessageObjectMap"),
}
@@ -411,32 +414,32 @@ KNOWN_OBJECTS = {
("read_only_space", 0x0282d): "TerminationException",
("read_only_space", 0x02895): "OptimizedOut",
("read_only_space", 0x028f5): "StaleRegister",
- ("read_only_space", 0x0319d): "EmptyPropertyArray",
- ("read_only_space", 0x031a5): "EmptyByteArray",
- ("read_only_space", 0x031ad): "EmptyObjectBoilerplateDescription",
- ("read_only_space", 0x031e1): "EmptyArrayBoilerplateDescription",
- ("read_only_space", 0x031ed): "EmptyClosureFeedbackCellArray",
- ("read_only_space", 0x031f5): "EmptySlowElementDictionary",
- ("read_only_space", 0x03219): "EmptyOrderedHashMap",
- ("read_only_space", 0x0322d): "EmptyOrderedHashSet",
- ("read_only_space", 0x03241): "EmptyFeedbackMetadata",
- ("read_only_space", 0x0324d): "EmptyPropertyDictionary",
- ("read_only_space", 0x03275): "EmptyOrderedPropertyDictionary",
- ("read_only_space", 0x0328d): "EmptySwissPropertyDictionary",
- ("read_only_space", 0x032e1): "NoOpInterceptorInfo",
- ("read_only_space", 0x03309): "EmptyWeakArrayList",
- ("read_only_space", 0x03315): "InfinityValue",
- ("read_only_space", 0x03321): "MinusZeroValue",
- ("read_only_space", 0x0332d): "MinusInfinityValue",
- ("read_only_space", 0x03339): "SelfReferenceMarker",
- ("read_only_space", 0x03379): "BasicBlockCountersMarker",
- ("read_only_space", 0x033bd): "OffHeapTrampolineRelocationInfo",
- ("read_only_space", 0x033c9): "TrampolineTrivialCodeDataContainer",
- ("read_only_space", 0x033d5): "TrampolinePromiseRejectionCodeDataContainer",
- ("read_only_space", 0x033e1): "GlobalThisBindingScopeInfo",
- ("read_only_space", 0x03415): "EmptyFunctionScopeInfo",
- ("read_only_space", 0x03439): "NativeScopeInfo",
- ("read_only_space", 0x03451): "HashSeed",
+ ("read_only_space", 0x03215): "EmptyPropertyArray",
+ ("read_only_space", 0x0321d): "EmptyByteArray",
+ ("read_only_space", 0x03225): "EmptyObjectBoilerplateDescription",
+ ("read_only_space", 0x03259): "EmptyArrayBoilerplateDescription",
+ ("read_only_space", 0x03265): "EmptyClosureFeedbackCellArray",
+ ("read_only_space", 0x0326d): "EmptySlowElementDictionary",
+ ("read_only_space", 0x03291): "EmptyOrderedHashMap",
+ ("read_only_space", 0x032a5): "EmptyOrderedHashSet",
+ ("read_only_space", 0x032b9): "EmptyFeedbackMetadata",
+ ("read_only_space", 0x032c5): "EmptyPropertyDictionary",
+ ("read_only_space", 0x032ed): "EmptyOrderedPropertyDictionary",
+ ("read_only_space", 0x03305): "EmptySwissPropertyDictionary",
+ ("read_only_space", 0x03359): "NoOpInterceptorInfo",
+ ("read_only_space", 0x03381): "EmptyWeakArrayList",
+ ("read_only_space", 0x0338d): "InfinityValue",
+ ("read_only_space", 0x03399): "MinusZeroValue",
+ ("read_only_space", 0x033a5): "MinusInfinityValue",
+ ("read_only_space", 0x033b1): "SelfReferenceMarker",
+ ("read_only_space", 0x033f1): "BasicBlockCountersMarker",
+ ("read_only_space", 0x03435): "OffHeapTrampolineRelocationInfo",
+ ("read_only_space", 0x03441): "TrampolineTrivialCodeDataContainer",
+ ("read_only_space", 0x0344d): "TrampolinePromiseRejectionCodeDataContainer",
+ ("read_only_space", 0x03459): "GlobalThisBindingScopeInfo",
+ ("read_only_space", 0x0348d): "EmptyFunctionScopeInfo",
+ ("read_only_space", 0x034b1): "NativeScopeInfo",
+ ("read_only_space", 0x034c9): "HashSeed",
("old_space", 0x02119): "ArgumentsIteratorAccessor",
("old_space", 0x0215d): "ArrayLengthAccessor",
("old_space", 0x021a1): "BoundFunctionLengthAccessor",
@@ -453,52 +456,53 @@ KNOWN_OBJECTS = {
("old_space", 0x0244d): "ManyClosuresCell",
("old_space", 0x02459): "ArrayConstructorProtector",
("old_space", 0x0246d): "NoElementsProtector",
- ("old_space", 0x02481): "IsConcatSpreadableProtector",
- ("old_space", 0x02495): "ArraySpeciesProtector",
- ("old_space", 0x024a9): "TypedArraySpeciesProtector",
- ("old_space", 0x024bd): "PromiseSpeciesProtector",
- ("old_space", 0x024d1): "RegExpSpeciesProtector",
- ("old_space", 0x024e5): "StringLengthProtector",
- ("old_space", 0x024f9): "ArrayIteratorProtector",
- ("old_space", 0x0250d): "ArrayBufferDetachingProtector",
- ("old_space", 0x02521): "PromiseHookProtector",
- ("old_space", 0x02535): "PromiseResolveProtector",
- ("old_space", 0x02549): "MapIteratorProtector",
- ("old_space", 0x0255d): "PromiseThenProtector",
- ("old_space", 0x02571): "SetIteratorProtector",
- ("old_space", 0x02585): "StringIteratorProtector",
- ("old_space", 0x02599): "SingleCharacterStringCache",
- ("old_space", 0x029a1): "StringSplitCache",
- ("old_space", 0x02da9): "RegExpMultipleCache",
- ("old_space", 0x031b1): "BuiltinsConstantsTable",
- ("old_space", 0x035b1): "AsyncFunctionAwaitRejectSharedFun",
- ("old_space", 0x035d5): "AsyncFunctionAwaitResolveSharedFun",
- ("old_space", 0x035f9): "AsyncGeneratorAwaitRejectSharedFun",
- ("old_space", 0x0361d): "AsyncGeneratorAwaitResolveSharedFun",
- ("old_space", 0x03641): "AsyncGeneratorYieldResolveSharedFun",
- ("old_space", 0x03665): "AsyncGeneratorReturnResolveSharedFun",
- ("old_space", 0x03689): "AsyncGeneratorReturnClosedRejectSharedFun",
- ("old_space", 0x036ad): "AsyncGeneratorReturnClosedResolveSharedFun",
- ("old_space", 0x036d1): "AsyncIteratorValueUnwrapSharedFun",
- ("old_space", 0x036f5): "PromiseAllResolveElementSharedFun",
- ("old_space", 0x03719): "PromiseAllSettledResolveElementSharedFun",
- ("old_space", 0x0373d): "PromiseAllSettledRejectElementSharedFun",
- ("old_space", 0x03761): "PromiseAnyRejectElementSharedFun",
- ("old_space", 0x03785): "PromiseCapabilityDefaultRejectSharedFun",
- ("old_space", 0x037a9): "PromiseCapabilityDefaultResolveSharedFun",
- ("old_space", 0x037cd): "PromiseCatchFinallySharedFun",
- ("old_space", 0x037f1): "PromiseGetCapabilitiesExecutorSharedFun",
- ("old_space", 0x03815): "PromiseThenFinallySharedFun",
- ("old_space", 0x03839): "PromiseThrowerFinallySharedFun",
- ("old_space", 0x0385d): "PromiseValueThunkFinallySharedFun",
- ("old_space", 0x03881): "ProxyRevokeSharedFun",
+ ("old_space", 0x02481): "MegaDOMProtector",
+ ("old_space", 0x02495): "IsConcatSpreadableProtector",
+ ("old_space", 0x024a9): "ArraySpeciesProtector",
+ ("old_space", 0x024bd): "TypedArraySpeciesProtector",
+ ("old_space", 0x024d1): "PromiseSpeciesProtector",
+ ("old_space", 0x024e5): "RegExpSpeciesProtector",
+ ("old_space", 0x024f9): "StringLengthProtector",
+ ("old_space", 0x0250d): "ArrayIteratorProtector",
+ ("old_space", 0x02521): "ArrayBufferDetachingProtector",
+ ("old_space", 0x02535): "PromiseHookProtector",
+ ("old_space", 0x02549): "PromiseResolveProtector",
+ ("old_space", 0x0255d): "MapIteratorProtector",
+ ("old_space", 0x02571): "PromiseThenProtector",
+ ("old_space", 0x02585): "SetIteratorProtector",
+ ("old_space", 0x02599): "StringIteratorProtector",
+ ("old_space", 0x025ad): "SingleCharacterStringCache",
+ ("old_space", 0x029b5): "StringSplitCache",
+ ("old_space", 0x02dbd): "RegExpMultipleCache",
+ ("old_space", 0x031c5): "BuiltinsConstantsTable",
+ ("old_space", 0x035d1): "AsyncFunctionAwaitRejectSharedFun",
+ ("old_space", 0x035f5): "AsyncFunctionAwaitResolveSharedFun",
+ ("old_space", 0x03619): "AsyncGeneratorAwaitRejectSharedFun",
+ ("old_space", 0x0363d): "AsyncGeneratorAwaitResolveSharedFun",
+ ("old_space", 0x03661): "AsyncGeneratorYieldResolveSharedFun",
+ ("old_space", 0x03685): "AsyncGeneratorReturnResolveSharedFun",
+ ("old_space", 0x036a9): "AsyncGeneratorReturnClosedRejectSharedFun",
+ ("old_space", 0x036cd): "AsyncGeneratorReturnClosedResolveSharedFun",
+ ("old_space", 0x036f1): "AsyncIteratorValueUnwrapSharedFun",
+ ("old_space", 0x03715): "PromiseAllResolveElementSharedFun",
+ ("old_space", 0x03739): "PromiseAllSettledResolveElementSharedFun",
+ ("old_space", 0x0375d): "PromiseAllSettledRejectElementSharedFun",
+ ("old_space", 0x03781): "PromiseAnyRejectElementSharedFun",
+ ("old_space", 0x037a5): "PromiseCapabilityDefaultRejectSharedFun",
+ ("old_space", 0x037c9): "PromiseCapabilityDefaultResolveSharedFun",
+ ("old_space", 0x037ed): "PromiseCatchFinallySharedFun",
+ ("old_space", 0x03811): "PromiseGetCapabilitiesExecutorSharedFun",
+ ("old_space", 0x03835): "PromiseThenFinallySharedFun",
+ ("old_space", 0x03859): "PromiseThrowerFinallySharedFun",
+ ("old_space", 0x0387d): "PromiseValueThunkFinallySharedFun",
+ ("old_space", 0x038a1): "ProxyRevokeSharedFun",
}
# Lower 32 bits of first page addresses for various heap spaces.
HEAP_FIRST_PAGES = {
- 0x08100000: "old_space",
- 0x08140000: "map_space",
- 0x08040000: "read_only_space",
+ 0x080c0000: "old_space",
+ 0x08100000: "map_space",
+ 0x08000000: "read_only_space",
}
# List of known V8 Frame Markers.
diff --git a/chromium/v8/tools/v8windbg/BUILD.gn b/chromium/v8/tools/v8windbg/BUILD.gn
index 5618d2d9455..e30b826b0f4 100644
--- a/chromium/v8/tools/v8windbg/BUILD.gn
+++ b/chromium/v8/tools/v8windbg/BUILD.gn
@@ -42,8 +42,6 @@ v8_shared_library("v8windbg") {
"src/cur-isolate.h",
"src/js-stack.cc",
"src/js-stack.h",
- "src/list-chunks.cc",
- "src/list-chunks.h",
"src/local-variables.cc",
"src/local-variables.h",
"src/object-inspection.cc",
@@ -56,6 +54,9 @@ v8_shared_library("v8windbg") {
deps = [
":v8windbg_base",
+ "../../:v8_flags",
+ "../../:v8_libbase",
+ "../../:v8_shared_internal_headers",
"../debug_helper:v8_debug_helper",
]
}
diff --git a/chromium/v8/tools/v8windbg/README.md b/chromium/v8/tools/v8windbg/README.md
index de6638e4d43..887f437b0de 100644
--- a/chromium/v8/tools/v8windbg/README.md
+++ b/chromium/v8/tools/v8windbg/README.md
@@ -41,8 +41,6 @@ functions that can be called from within `dx` commands:
like `dx @$v8object(0x34f49880471, "v8::internal::JSArray")`.
- `@$curisolate()` gets the Isolate pointer for the current thread, if the
current thread has a JavaScript Isolate associated.
-- `@$listchunks()` returns a list of the memory chunks in the Heap for the
- current Isolate.
- `@$jsstack()` returns a list of the JS stack frames, including information
about script and function.
@@ -67,8 +65,6 @@ functions declared in `dbgext.h` to create and destroy the extension instance.
`./src` file index:
- `cur-isolate.{cc,h}` implements the `IModelMethod` for `@$curisolate()`.
-- `list-chunks.{cc,h}` implements the `IModelMethod` for `@$listchunks()`. Its
- result is a custom object that supports iteration and indexing.
- `js-stack.{cc,h}` implements the `IModelMethod` for `@$jsstack()`. Its
result is a custom object that supports iteration and indexing.
- `local-variables.{cc,h}` implements the `IModelPropertyAccessor` that provides
diff --git a/chromium/v8/tools/v8windbg/src/list-chunks.cc b/chromium/v8/tools/v8windbg/src/list-chunks.cc
deleted file mode 100644
index 90b3ff6af69..00000000000
--- a/chromium/v8/tools/v8windbg/src/list-chunks.cc
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "tools/v8windbg/src/list-chunks.h"
-
-#include "tools/v8windbg/src/cur-isolate.h"
-
-// v8windbg!ListChunksAlias::Call
-IFACEMETHODIMP ListChunksAlias::Call(IModelObject* p_context_object,
- ULONG64 arg_count,
- _In_reads_(arg_count)
- IModelObject** pp_arguments,
- IModelObject** pp_result,
- IKeyStore** pp_metadata) noexcept {
- WRL::ComPtr<IDebugHostContext> sp_ctx;
- RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_ctx));
-
- WRL::ComPtr<IModelObject> result;
- RETURN_IF_FAIL(
- sp_data_model_manager->CreateSyntheticObject(sp_ctx.Get(), &result));
-
- auto sp_iterator{WRL::Make<MemoryChunks>()};
-
- RETURN_IF_FAIL(result->SetConcept(
- __uuidof(IIndexableConcept),
- static_cast<IIndexableConcept*>(sp_iterator.Get()), nullptr));
- RETURN_IF_FAIL(result->SetConcept(
- __uuidof(IIterableConcept),
- static_cast<IIterableConcept*>(sp_iterator.Get()), nullptr));
-
- *pp_result = result.Detach();
- if (pp_metadata) {
- *pp_metadata = nullptr;
- }
- return S_OK;
-}
-
-ChunkData::ChunkData() = default;
-ChunkData::~ChunkData() = default;
-ChunkData::ChunkData(const ChunkData&) = default;
-ChunkData::ChunkData(ChunkData&&) = default;
-ChunkData& ChunkData::operator=(const ChunkData&) = default;
-ChunkData& ChunkData::operator=(ChunkData&&) = default;
-
-MemoryChunkIterator::MemoryChunkIterator(
- WRL::ComPtr<IDebugHostContext>& host_context)
- : sp_ctx_(host_context) {}
-MemoryChunkIterator::~MemoryChunkIterator() = default;
-
-HRESULT MemoryChunkIterator::PopulateChunkData() {
- WRL::ComPtr<IModelObject> sp_isolate, sp_heap, sp_space;
- chunks_.clear();
-
- RETURN_IF_FAIL(GetCurrentIsolate(sp_isolate));
-
- RETURN_IF_FAIL(
- sp_isolate->GetRawValue(SymbolField, L"heap_", RawSearchNone, &sp_heap));
- RETURN_IF_FAIL(
- sp_heap->GetRawValue(SymbolField, L"space_", RawSearchNone, &sp_space));
-
- WRL::ComPtr<IDebugHostType> sp_space_type;
- RETURN_IF_FAIL(sp_space->GetTypeInfo(&sp_space_type));
-
- // Iterate over the array of Space pointers
- WRL::ComPtr<IIterableConcept> sp_iterable;
- RETURN_IF_FAIL(
- sp_space->GetConcept(__uuidof(IIterableConcept), &sp_iterable, nullptr));
-
- WRL::ComPtr<IModelIterator> sp_space_iterator;
- RETURN_IF_FAIL(sp_iterable->GetIterator(sp_space.Get(), &sp_space_iterator));
-
- // Loop through all the spaces in the array
- WRL::ComPtr<IModelObject> sp_space_ptr;
- while (sp_space_iterator->GetNext(&sp_space_ptr, 0, nullptr, nullptr) !=
- E_BOUNDS) {
- // Should have gotten a "v8::internal::Space *". Dereference, then get field
- // "memory_chunk_list_" [Type: v8::base::List<v8::internal::MemoryChunk>]
- WRL::ComPtr<IModelObject> sp_space, sp_chunk_list, sp_mem_chunk_ptr,
- sp_mem_chunk;
- RETURN_IF_FAIL(sp_space_ptr->Dereference(&sp_space));
- RETURN_IF_FAIL(sp_space->GetRawValue(SymbolField, L"memory_chunk_list_",
- RawSearchNone, &sp_chunk_list));
-
- // Then get field "front_" [Type: v8::internal::MemoryChunk *]
- RETURN_IF_FAIL(sp_chunk_list->GetRawValue(
- SymbolField, L"front_", RawSearchNone, &sp_mem_chunk_ptr));
-
- // Loop here on the list of MemoryChunks for the space
- while (true) {
- // See if it is a nullptr (i.e. no chunks in this space)
- uint64_t front_val;
- RETURN_IF_FAIL(
- UnboxULong64(sp_mem_chunk_ptr.Get(), &front_val, true /*convert*/));
- if (front_val == 0) {
- break;
- }
-
- // Dereference and get fields "area_start_" and "area_end_" (both uint64)
- RETURN_IF_FAIL(sp_mem_chunk_ptr->Dereference(&sp_mem_chunk));
-
- WRL::ComPtr<IModelObject> sp_start, sp_end;
- RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"area_start_",
- RawSearchNone, &sp_start));
- RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"area_end_",
- RawSearchNone, &sp_end));
-
- ChunkData chunk_entry;
- chunk_entry.area_start = sp_start;
- chunk_entry.area_end = sp_end;
- chunk_entry.space = sp_space;
- chunks_.push_back(chunk_entry);
-
- // Follow the list_node_.next_ to the next memory chunk
- WRL::ComPtr<IModelObject> sp_list_node;
- RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"list_node_",
- RawSearchNone, &sp_list_node));
-
- sp_mem_chunk_ptr = nullptr;
- sp_mem_chunk = nullptr;
- RETURN_IF_FAIL(sp_list_node->GetRawValue(
- SymbolField, L"next_", RawSearchNone, &sp_mem_chunk_ptr));
- // Top of the loop will check if this is a nullptr and exit if so
- }
- sp_space_ptr = nullptr;
- }
-
- return S_OK;
-}
-
-IFACEMETHODIMP MemoryChunkIterator::Reset() noexcept {
- position_ = 0;
- return S_OK;
-}
-
-IFACEMETHODIMP MemoryChunkIterator::GetNext(IModelObject** object,
- ULONG64 dimensions,
- IModelObject** indexers,
- IKeyStore** metadata) noexcept {
- if (dimensions > 1) return E_INVALIDARG;
-
- if (position_ == 0) {
- RETURN_IF_FAIL(PopulateChunkData());
- }
-
- if (metadata != nullptr) *metadata = nullptr;
-
- WRL::ComPtr<IModelObject> sp_index, sp_value;
-
- if (dimensions == 1) {
- RETURN_IF_FAIL(CreateULong64(position_, &sp_index));
- }
-
- RETURN_IF_FAIL(GetAt(position_, &sp_value));
-
- // Now update counter and transfer ownership of results, because nothing can
- // fail from this point onward.
- ++position_;
- if (dimensions == 1) {
- *indexers = sp_index.Detach();
- }
- *object = sp_value.Detach();
- return S_OK;
-}
-
-HRESULT MemoryChunkIterator::GetAt(uint64_t index,
- IModelObject** result) const {
- if (index >= chunks_.size()) return E_BOUNDS;
-
- // Create the synthetic object representing the chunk here
- const ChunkData& curr_chunk = chunks_.at(index);
- WRL::ComPtr<IModelObject> sp_value;
- RETURN_IF_FAIL(
- sp_data_model_manager->CreateSyntheticObject(sp_ctx_.Get(), &sp_value));
- RETURN_IF_FAIL(
- sp_value->SetKey(L"area_start", curr_chunk.area_start.Get(), nullptr));
- RETURN_IF_FAIL(
- sp_value->SetKey(L"area_end", curr_chunk.area_end.Get(), nullptr));
- RETURN_IF_FAIL(sp_value->SetKey(L"space", curr_chunk.space.Get(), nullptr));
-
- *result = sp_value.Detach();
- return S_OK;
-}
-
-MemoryChunks::MemoryChunks() = default;
-MemoryChunks::~MemoryChunks() = default;
-
-IFACEMETHODIMP MemoryChunks::GetDimensionality(
- IModelObject* context_object, ULONG64* dimensionality) noexcept {
- *dimensionality = 1;
- return S_OK;
-}
-
-IFACEMETHODIMP MemoryChunks::GetAt(IModelObject* context_object,
- ULONG64 indexer_count,
- IModelObject** indexers,
- IModelObject** object,
- IKeyStore** metadata) noexcept {
- if (indexer_count != 1) return E_INVALIDARG;
- if (metadata != nullptr) *metadata = nullptr;
- WRL::ComPtr<IDebugHostContext> sp_ctx;
- RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
-
- // This should be instantiated once for each synthetic object returned,
- // so should be able to cache/reuse an iterator
- if (opt_chunks_ == nullptr) {
- opt_chunks_ = WRL::Make<MemoryChunkIterator>(sp_ctx);
- _ASSERT(opt_chunks_ != nullptr);
- RETURN_IF_FAIL(opt_chunks_->PopulateChunkData());
- }
-
- uint64_t index;
- RETURN_IF_FAIL(UnboxULong64(indexers[0], &index, true /*convert*/));
-
- return opt_chunks_->GetAt(index, object);
-}
-
-IFACEMETHODIMP MemoryChunks::SetAt(IModelObject* context_object,
- ULONG64 indexer_count,
- IModelObject** indexers,
- IModelObject* value) noexcept {
- return E_NOTIMPL;
-}
-
-IFACEMETHODIMP MemoryChunks::GetDefaultIndexDimensionality(
- IModelObject* context_object, ULONG64* dimensionality) noexcept {
- *dimensionality = 1;
- return S_OK;
-}
-
-IFACEMETHODIMP MemoryChunks::GetIterator(IModelObject* context_object,
- IModelIterator** iterator) noexcept {
- WRL::ComPtr<IDebugHostContext> sp_ctx;
- RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
- auto sp_memory_iterator{WRL::Make<MemoryChunkIterator>(sp_ctx)};
- *iterator = sp_memory_iterator.Detach();
- return S_OK;
-}
diff --git a/chromium/v8/tools/v8windbg/src/list-chunks.h b/chromium/v8/tools/v8windbg/src/list-chunks.h
deleted file mode 100644
index 10eec100d17..00000000000
--- a/chromium/v8/tools/v8windbg/src/list-chunks.h
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
-#define V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
-
-#include <crtdbg.h>
-#include <wrl/implements.h>
-
-#include <optional>
-#include <string>
-#include <vector>
-
-#include "src/base/optional.h"
-#include "tools/v8windbg/base/utilities.h"
-#include "tools/v8windbg/src/v8-debug-helper-interop.h"
-#include "tools/v8windbg/src/v8windbg-extension.h"
-
-class ListChunksAlias
- : public WRL::RuntimeClass<
- WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
- IModelMethod> {
- public:
- IFACEMETHOD(Call)
- (IModelObject* p_context_object, ULONG64 arg_count,
- _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
- IKeyStore** pp_metadata);
-};
-
-struct ChunkData {
- ChunkData();
- ~ChunkData();
- ChunkData(const ChunkData&);
- ChunkData(ChunkData&&);
- ChunkData& operator=(const ChunkData&);
- ChunkData& operator=(ChunkData&&);
- WRL::ComPtr<IModelObject> area_start;
- WRL::ComPtr<IModelObject> area_end;
- WRL::ComPtr<IModelObject> space;
-};
-
-class MemoryChunkIterator
- : public WRL::RuntimeClass<
- WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
- IModelIterator> {
- public:
- MemoryChunkIterator(WRL::ComPtr<IDebugHostContext>& host_context);
- ~MemoryChunkIterator() override;
-
- HRESULT PopulateChunkData();
-
- IFACEMETHOD(Reset)();
-
- IFACEMETHOD(GetNext)
- (IModelObject** object, ULONG64 dimensions, IModelObject** indexers,
- IKeyStore** metadata);
-
- const std::vector<ChunkData>& GetChunks() const { return chunks_; }
-
- HRESULT GetAt(uint64_t index, IModelObject** result) const;
-
- private:
- ULONG position_ = 0;
- std::vector<ChunkData> chunks_;
- WRL::ComPtr<IDebugHostContext> sp_ctx_;
-};
-
-class MemoryChunks
- : public WRL::RuntimeClass<
- WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
- IIndexableConcept, IIterableConcept> {
- public:
- MemoryChunks();
- ~MemoryChunks() override;
-
- // IIndexableConcept members
- IFACEMETHOD(GetDimensionality)
- (IModelObject* context_object, ULONG64* dimensionality);
-
- IFACEMETHOD(GetAt)
- (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
- IModelObject** object, IKeyStore** metadata);
-
- IFACEMETHOD(SetAt)
- (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
- IModelObject* value);
-
- // IIterableConcept
- IFACEMETHOD(GetDefaultIndexDimensionality)
- (IModelObject* context_object, ULONG64* dimensionality);
-
- IFACEMETHOD(GetIterator)
- (IModelObject* context_object, IModelIterator** iterator);
-
- private:
- WRL::ComPtr<MemoryChunkIterator> opt_chunks_;
-};
-
-#endif // V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
diff --git a/chromium/v8/tools/v8windbg/src/v8windbg-extension.cc b/chromium/v8/tools/v8windbg/src/v8windbg-extension.cc
index 7fbe39d1920..55014cfc8e3 100644
--- a/chromium/v8/tools/v8windbg/src/v8windbg-extension.cc
+++ b/chromium/v8/tools/v8windbg/src/v8windbg-extension.cc
@@ -9,14 +9,12 @@
#include "tools/v8windbg/base/utilities.h"
#include "tools/v8windbg/src/cur-isolate.h"
#include "tools/v8windbg/src/js-stack.h"
-#include "tools/v8windbg/src/list-chunks.h"
#include "tools/v8windbg/src/local-variables.h"
#include "tools/v8windbg/src/object-inspection.h"
std::unique_ptr<Extension> Extension::current_extension_ = nullptr;
const wchar_t* pcur_isolate = L"curisolate";
const wchar_t* pjs_stack = L"jsstack";
-const wchar_t* plist_chunks = L"listchunks";
const wchar_t* pv8_object = L"v8object";
HRESULT CreateExtension() {
@@ -263,7 +261,6 @@ HRESULT Extension::Initialize() {
std::vector<std::pair<const wchar_t*, WRL::ComPtr<IModelMethod>>> functions =
{{pcur_isolate, WRL::Make<CurrIsolateAlias>()},
{pjs_stack, WRL::Make<JSStackAlias>()},
- {plist_chunks, WRL::Make<ListChunksAlias>()},
{pv8_object, WRL::Make<InspectV8ObjectMethod>()}};
for (const auto& function : functions) {
WRL::ComPtr<IModelObject> method;
@@ -375,7 +372,6 @@ Extension::RegistrationType& Extension::RegistrationType::operator=(
Extension::~Extension() {
sp_debug_host_extensibility->DestroyFunctionAlias(pcur_isolate);
sp_debug_host_extensibility->DestroyFunctionAlias(pjs_stack);
- sp_debug_host_extensibility->DestroyFunctionAlias(plist_chunks);
sp_debug_host_extensibility->DestroyFunctionAlias(pv8_object);
for (const auto& registered : registered_types_) {
diff --git a/chromium/v8/tools/whitespace.txt b/chromium/v8/tools/whitespace.txt
index 61fd2e94864..8542464212c 100644
--- a/chromium/v8/tools/whitespace.txt
+++ b/chromium/v8/tools/whitespace.txt
@@ -6,9 +6,9 @@ A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly.....
+The autoroller bought a round of Himbeerbrause. Suddenly........
The bartender starts to shake the bottles...........................
-I can't add trailing whitespaces, so I'm adding this line...........
+I can't add trailing whitespaces, so I'm adding this line............
I'm starting to think that just adding trailing whitespaces might not be bad.
Because whitespaces are not that funny......